Push version 2.1.1 to trunk.

[ES5] Implemented Object.defineProperty.

Improved profiler support.

Added SetPrototype method in the public V8 API.

Added GetScriptOrigin and GetScriptLineNumber methods to Function objects in the API.

Performance improvements on all platforms.


git-svn-id: http://v8.googlecode.com/svn/trunk@3905 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index ebda77a..1a81cc7 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -72,6 +72,7 @@
     interpreter-irregexp.cc
     jsregexp.cc
     jump-target.cc
+    liveedit.cc
     log-utils.cc
     log.cc
     mark-compact.cc
@@ -131,6 +132,24 @@
   'armvariant:thumb2': Split("""
     arm/assembler-thumb2.cc
     """),
+  'arch:mips': Split("""
+    mips/assembler-mips.cc
+    mips/builtins-mips.cc
+    mips/codegen-mips.cc
+    mips/constants-mips.cc
+    mips/cpu-mips.cc
+    mips/debug-mips.cc
+    mips/disasm-mips.cc
+    mips/fast-codegen-mips.cc
+    mips/full-codegen-mips.cc
+    mips/frames-mips.cc
+    mips/ic-mips.cc
+    mips/jump-target-mips.cc
+    mips/macro-assembler-mips.cc
+    mips/register-allocator-mips.cc
+    mips/stub-cache-mips.cc
+    mips/virtual-frame-mips.cc
+    """),
   'arch:ia32': Split("""
     ia32/assembler-ia32.cc
     ia32/builtins-ia32.cc
@@ -168,6 +187,7 @@
     x64/virtual-frame-x64.cc
     """),
   'simulator:arm': ['arm/simulator-arm.cc'],
+  'simulator:mips': ['mips/simulator-mips.cc'],
   'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
   'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
   'os:linux':   ['platform-linux.cc', 'platform-posix.cc'],
diff --git a/src/accessors.cc b/src/accessors.cc
index 5a02928..b05719e 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -647,42 +647,9 @@
 Object* Accessors::ObjectSetPrototype(JSObject* receiver,
                                       Object* value,
                                       void*) {
-  // Before we can set the prototype we need to be sure
-  // prototype cycles are prevented.
-  // It is sufficient to validate that the receiver is not in the new prototype
-  // chain.
-
-  // Silently ignore the change if value is not a JSObject or null.
-  // SpiderMonkey behaves this way.
-  if (!value->IsJSObject() && !value->IsNull()) return value;
-
-  for (Object* pt = value; pt != Heap::null_value(); pt = pt->GetPrototype()) {
-    if (JSObject::cast(pt) == receiver) {
-      // Cycle detected.
-      HandleScope scope;
-      return Top::Throw(*Factory::NewError("cyclic_proto",
-                                           HandleVector<Object>(NULL, 0)));
-    }
-  }
-
-  // Find the first object in the chain whose prototype object is not
-  // hidden and set the new prototype on that object.
-  JSObject* current = receiver;
-  Object* current_proto = receiver->GetPrototype();
-  while (current_proto->IsJSObject() &&
-         JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
-    current = JSObject::cast(current_proto);
-    current_proto = current_proto->GetPrototype();
-  }
-
-  // Set the new prototype of the object.
-  Object* new_map = current->map()->CopyDropTransitions();
-  if (new_map->IsFailure()) return new_map;
-  Map::cast(new_map)->set_prototype(value);
-  current->set_map(Map::cast(new_map));
-
+  const bool skip_hidden_prototypes = true;
   // To be consistent with other Set functions, return the value.
-  return value;
+  return receiver->SetPrototype(value, skip_hidden_prototypes);
 }
 
 
diff --git a/src/api.cc b/src/api.cc
index 322c90f..dbb3d8b 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1106,7 +1106,8 @@
 
 Local<Script> Script::New(v8::Handle<String> source,
                           v8::ScriptOrigin* origin,
-                          v8::ScriptData* script_data) {
+                          v8::ScriptData* pre_data,
+                          v8::Handle<String> script_data) {
   ON_BAILOUT("v8::Script::New()", return Local<Script>());
   LOG_API("Script::New");
   ENTER_V8;
@@ -1126,20 +1127,17 @@
     }
   }
   EXCEPTION_PREAMBLE();
-  i::ScriptDataImpl* pre_data = static_cast<i::ScriptDataImpl*>(script_data);
+  i::ScriptDataImpl* pre_data_impl = static_cast<i::ScriptDataImpl*>(pre_data);
   // We assert that the pre-data is sane, even though we can actually
   // handle it if it turns out not to be in release mode.
-  ASSERT(pre_data == NULL || pre_data->SanityCheck());
+  ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
   // If the pre-data isn't sane we simply ignore it
-  if (pre_data != NULL && !pre_data->SanityCheck()) {
-    pre_data = NULL;
+  if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
+    pre_data_impl = NULL;
   }
-  i::Handle<i::JSFunction> boilerplate = i::Compiler::Compile(str,
-                                                              name_obj,
-                                                              line_offset,
-                                                              column_offset,
-                                                              NULL,
-                                                              pre_data);
+  i::Handle<i::JSFunction> boilerplate =
+      i::Compiler::Compile(str, name_obj, line_offset, column_offset, NULL,
+                           pre_data_impl, Utils::OpenHandle(*script_data));
   has_pending_exception = boilerplate.is_null();
   EXCEPTION_BAILOUT_CHECK(Local<Script>());
   return Local<Script>(ToApi<Script>(boilerplate));
@@ -1155,11 +1153,12 @@
 
 Local<Script> Script::Compile(v8::Handle<String> source,
                               v8::ScriptOrigin* origin,
-                              v8::ScriptData* script_data) {
+                              v8::ScriptData* pre_data,
+                              v8::Handle<String> script_data) {
   ON_BAILOUT("v8::Script::Compile()", return Local<Script>());
   LOG_API("Script::Compile");
   ENTER_V8;
-  Local<Script> generic = New(source, origin, script_data);
+  Local<Script> generic = New(source, origin, pre_data, script_data);
   if (generic.IsEmpty())
     return generic;
   i::Handle<i::JSFunction> boilerplate = Utils::OpenHandle(*generic);
@@ -1171,9 +1170,10 @@
 
 
 Local<Script> Script::Compile(v8::Handle<String> source,
-                              v8::Handle<Value> file_name) {
+                              v8::Handle<Value> file_name,
+                              v8::Handle<String> script_data) {
   ScriptOrigin origin(file_name);
-  return Compile(source, &origin);
+  return Compile(source, &origin, 0, script_data);
 }
 
 
@@ -2032,6 +2032,19 @@
 }
 
 
+bool v8::Object::SetPrototype(Handle<Value> value) {
+  ON_BAILOUT("v8::Object::SetPrototype()", return false);
+  ENTER_V8;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> result = i::SetPrototype(self, value_obj);
+  has_pending_exception = result.is_null();
+  EXCEPTION_BAILOUT_CHECK(false);
+  return true;
+}
+
+
 Local<Object> v8::Object::FindInstanceInPrototypeChain(
     v8::Handle<FunctionTemplate> tmpl) {
   ON_BAILOUT("v8::Object::FindInstanceInPrototypeChain()",
@@ -2194,7 +2207,7 @@
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
   i::LookupResult lookup;
   self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
-  if (lookup.IsValid()) {
+  if (lookup.IsProperty()) {
     PropertyAttributes attributes;
     i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
                                                       &lookup,
@@ -2213,7 +2226,7 @@
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
   i::LookupResult lookup;
   self_obj->LookupRealNamedProperty(*key_obj, &lookup);
-  if (lookup.IsValid()) {
+  if (lookup.IsProperty()) {
     PropertyAttributes attributes;
     i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
                                                       &lookup,
@@ -2445,6 +2458,99 @@
 }
 
 
+ScriptOrigin Function::GetScriptOrigin() const {
+  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+  if (func->shared()->script()->IsScript()) {
+    i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+    v8::ScriptOrigin origin(
+      Utils::ToLocal(i::Handle<i::Object>(script->name())),
+      v8::Integer::New(script->line_offset()->value()),
+      v8::Integer::New(script->column_offset()->value()));
+    return origin;
+  }
+  return v8::ScriptOrigin(Handle<Value>());
+}
+
+
+const int Function::kLineOffsetNotFound = -1;
+
+
+int Function::GetScriptLineNumber() const {
+  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+  if (func->shared()->script()->IsScript()) {
+    i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+    return i::GetScriptLineNumber(script, func->shared()->start_position());
+  }
+  return kLineOffsetNotFound;
+}
+
+
+namespace {
+
+// Tracks string usage to help make better decisions when
+// externalizing strings.
+//
+// Implementation note: internally this class only tracks fresh
+// strings and keeps a single use counter for them.
+class StringTracker {
+ public:
+  // Records that the given string's characters were copied to some
+  // external buffer. If this happens often we should honor
+  // externalization requests for the string.
+  static void RecordWrite(i::Handle<i::String> string) {
+    i::Address address = reinterpret_cast<i::Address>(*string);
+    i::Address top = i::Heap::NewSpaceTop();
+    if (IsFreshString(address, top)) {
+      IncrementUseCount(top);
+    }
+  }
+
+  // Estimates freshness and use frequency of the given string based
+  // on how close it is to the new space top and the recorded usage
+  // history.
+  static inline bool IsFreshUnusedString(i::Handle<i::String> string) {
+    i::Address address = reinterpret_cast<i::Address>(*string);
+    i::Address top = i::Heap::NewSpaceTop();
+    return IsFreshString(address, top) && IsUseCountLow(top);
+  }
+
+ private:
+  static inline bool IsFreshString(i::Address string, i::Address top) {
+    return top - kFreshnessLimit <= string && string <= top;
+  }
+
+  static inline bool IsUseCountLow(i::Address top) {
+    if (last_top_ != top) return true;
+    return use_count_ < kUseLimit;
+  }
+
+  static inline void IncrementUseCount(i::Address top) {
+    if (last_top_ != top) {
+      use_count_ = 0;
+      last_top_ = top;
+    }
+    ++use_count_;
+  }
+
+  // How close to the new space top a fresh string has to be.
+  static const int kFreshnessLimit = 1024;
+
+  // The number of uses required to consider a string useful.
+  static const int kUseLimit = 32;
+
+  // Single use counter shared by all fresh strings.
+  static int use_count_;
+
+  // Last new space top when the use count above was valid.
+  static i::Address last_top_;
+};
+
+int StringTracker::use_count_ = 0;
+i::Address StringTracker::last_top_ = NULL;
+
+}  // namespace
+
+
 int String::Length() const {
   if (IsDeadCheck("v8::String::Length()")) return 0;
   return Utils::OpenHandle(this)->length();
@@ -2462,6 +2568,7 @@
   LOG_API("String::WriteUtf8");
   ENTER_V8;
   i::Handle<i::String> str = Utils::OpenHandle(this);
+  StringTracker::RecordWrite(str);
   write_input_buffer.Reset(0, *str);
   int len = str->length();
   // Encode the first K - 3 bytes directly into the buffer since we
@@ -2505,6 +2612,7 @@
   ENTER_V8;
   ASSERT(start >= 0 && length >= -1);
   i::Handle<i::String> str = Utils::OpenHandle(this);
+  StringTracker::RecordWrite(str);
   // Flatten the string for efficiency.  This applies whether we are
   // using StringInputBuffer or Get(i) to access the characters.
   str->TryFlattenIfNotFlat();
@@ -2531,6 +2639,7 @@
   ENTER_V8;
   ASSERT(start >= 0 && length >= -1);
   i::Handle<i::String> str = Utils::OpenHandle(this);
+  StringTracker::RecordWrite(str);
   int end = length;
   if ( (length == -1) || (length > str->length() - start) )
     end = str->length() - start;
@@ -3098,6 +3207,7 @@
   if (this->IsExternal()) return false;  // Already an external string.
   ENTER_V8;
   i::Handle<i::String> obj = Utils::OpenHandle(this);
+  if (StringTracker::IsFreshUnusedString(obj)) return false;
   bool result = obj->MakeExternal(resource);
   if (result && !obj->IsSymbol()) {
     i::ExternalStringTable::AddString(*obj);
@@ -3123,6 +3233,7 @@
   if (this->IsExternal()) return false;  // Already an external string.
   ENTER_V8;
   i::Handle<i::String> obj = Utils::OpenHandle(this);
+  if (StringTracker::IsFreshUnusedString(obj)) return false;
   bool result = obj->MakeExternal(resource);
   if (result && !obj->IsSymbol()) {
     i::ExternalStringTable::AddString(*obj);
@@ -3134,6 +3245,7 @@
 bool v8::String::CanMakeExternal() {
   if (IsDeadCheck("v8::String::CanMakeExternal()")) return false;
   i::Handle<i::String> obj = Utils::OpenHandle(this);
+  if (StringTracker::IsFreshUnusedString(obj)) return false;
   int size = obj->Size();  // Byte size of the original string.
   if (size < i::ExternalString::kSize)
     return false;
@@ -3357,14 +3469,14 @@
 
 void V8::PauseProfiler() {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  i::Logger::PauseProfiler(PROFILER_MODULE_CPU);
+  PauseProfilerEx(PROFILER_MODULE_CPU);
 #endif
 }
 
 
 void V8::ResumeProfiler() {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  i::Logger::ResumeProfiler(PROFILER_MODULE_CPU);
+  ResumeProfilerEx(PROFILER_MODULE_CPU);
 #endif
 }
 
@@ -3378,7 +3490,7 @@
 }
 
 
-void V8::ResumeProfilerEx(int flags) {
+void V8::ResumeProfilerEx(int flags, int tag) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (flags & PROFILER_MODULE_HEAP_SNAPSHOT) {
     // Snapshot mode: resume modules, perform GC, then pause only
@@ -3388,19 +3500,19 @@
     // Reset snapshot flag and CPU module flags.
     flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
     const int current_flags = i::Logger::GetActiveProfilerModules();
-    i::Logger::ResumeProfiler(flags);
+    i::Logger::ResumeProfiler(flags, tag);
     i::Heap::CollectAllGarbage(false);
-    i::Logger::PauseProfiler(~current_flags & flags);
+    i::Logger::PauseProfiler(~current_flags & flags, tag);
   } else {
-    i::Logger::ResumeProfiler(flags);
+    i::Logger::ResumeProfiler(flags, tag);
   }
 #endif
 }
 
 
-void V8::PauseProfilerEx(int flags) {
+void V8::PauseProfilerEx(int flags, int tag) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  i::Logger::PauseProfiler(flags);
+  i::Logger::PauseProfiler(flags, tag);
 #endif
 }
 
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 74547be..c79aac6 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -51,9 +51,14 @@
   // If the compiler is allowed to use vfp then we can use vfp too in our
   // code generation.
 #if !defined(__arm__)
-  // For the simulator=arm build, always use VFP since the arm simulator has
-  // VFP support.
-  supported_ |= 1u << VFP3;
+  // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
+  if (FLAG_enable_vfp3) {
+      supported_ |= 1u << VFP3;
+  }
+  // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
+  if (FLAG_enable_armv7) {
+      supported_ |= 1u << ARMv7;
+  }
 #else
   if (Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
@@ -66,6 +71,11 @@
     supported_ |= 1u << VFP3;
     found_by_runtime_probing_ |= 1u << VFP3;
   }
+
+  if (OS::ArmCpuHasFeature(ARMv7)) {
+    supported_ |= 1u << ARMv7;
+    found_by_runtime_probing_ |= 1u << ARMv7;
+  }
 #endif
 }
 
@@ -83,9 +93,9 @@
 Register r5  = {  5 };
 Register r6  = {  6 };
 Register r7  = {  7 };
-Register r8  = {  8 };
+Register r8  = {  8 };  // Used as context register.
 Register r9  = {  9 };
-Register r10 = { 10 };
+Register r10 = { 10 };  // Used as roots register.
 Register fp  = { 11 };
 Register ip  = { 12 };
 Register sp  = { 13 };
@@ -264,9 +274,9 @@
 
 
 // -----------------------------------------------------------------------------
-// Implementation of Assembler
+// Implementation of Assembler.
 
-// Instruction encoding bits
+// Instruction encoding bits.
 enum {
   H   = 1 << 5,   // halfword (or byte)
   S6  = 1 << 6,   // signed (or unsigned)
@@ -299,14 +309,14 @@
   B26 = 1 << 26,
   B27 = 1 << 27,
 
-  // Instruction bit masks
+  // Instruction bit masks.
   RdMask     = 15 << 12,  // in str instruction
   CondMask   = 15 << 28,
   CoprocessorMask = 15 << 8,
   OpCodeMask = 15 << 21,  // in data-processing instructions
   Imm24Mask  = (1 << 24) - 1,
   Off12Mask  = (1 << 12) - 1,
-  // Reserved condition
+  // Reserved condition.
   nv = 15 << 28
 };
 
@@ -327,13 +337,13 @@
 // ldr pc, [pc, #XXX]
 const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
 
-// spare_buffer_
+// Spare buffer.
 static const int kMinimalBufferSize = 4*KB;
 static byte* spare_buffer_ = NULL;
 
 Assembler::Assembler(void* buffer, int buffer_size) {
   if (buffer == NULL) {
-    // do our own buffer management
+    // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
       buffer_size = kMinimalBufferSize;
 
@@ -351,14 +361,14 @@
     own_buffer_ = true;
 
   } else {
-    // use externally provided buffer instead
+    // Use externally provided buffer instead.
     ASSERT(buffer_size > 0);
     buffer_ = static_cast<byte*>(buffer);
     buffer_size_ = buffer_size;
     own_buffer_ = false;
   }
 
-  // setup buffer pointers
+  // Setup buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -386,11 +396,11 @@
 
 
 void Assembler::GetCode(CodeDesc* desc) {
-  // emit constant pool if necessary
+  // Emit constant pool if necessary.
   CheckConstPool(true, false);
   ASSERT(num_prinfo_ == 0);
 
-  // setup desc
+  // Setup code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -539,7 +549,7 @@
 void Assembler::link_to(Label* L, Label* appendix) {
   if (appendix->is_linked()) {
     if (L->is_linked()) {
-      // append appendix to L's list
+      // Append appendix to L's list.
       int fixup_pos;
       int link = L->pos();
       do {
@@ -549,7 +559,7 @@
       ASSERT(link == kEndOfChain);
       target_at_put(fixup_pos, appendix->pos());
     } else {
-      // L is empty, simply use appendix
+      // L is empty, simply use appendix.
       *L = *appendix;
     }
   }
@@ -575,12 +585,12 @@
 }
 
 
-// Low-level code emission routines depending on the addressing mode
+// Low-level code emission routines depending on the addressing mode.
 static bool fits_shifter(uint32_t imm32,
                          uint32_t* rotate_imm,
                          uint32_t* immed_8,
                          Instr* instr) {
-  // imm32 must be unsigned
+  // imm32 must be unsigned.
   for (int rot = 0; rot < 16; rot++) {
     uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
     if ((imm8 <= 0xff)) {
@@ -589,7 +599,7 @@
       return true;
     }
   }
-  // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+  // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
   if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
     if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
       *instr ^= 0x2*B21;
@@ -626,7 +636,7 @@
   CheckBuffer();
   ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
   if (!x.rm_.is_valid()) {
-    // immediate
+    // Immediate.
     uint32_t rotate_imm;
     uint32_t immed_8;
     if (MustUseIp(x.rmode_) ||
@@ -634,7 +644,7 @@
       // The immediate operand cannot be encoded as a shifter operand, so load
       // it first to register ip and change the original instruction to use ip.
       // However, if the original instruction is a 'mov rd, x' (not setting the
-      // condition code), then replace it with a 'ldr rd, [pc]'
+      // condition code), then replace it with a 'ldr rd, [pc]'.
       RecordRelocInfo(x.rmode_, x.imm32_);
       CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
       Condition cond = static_cast<Condition>(instr & CondMask);
@@ -648,16 +658,16 @@
     }
     instr |= I | rotate_imm*B8 | immed_8;
   } else if (!x.rs_.is_valid()) {
-    // immediate shift
+    // Immediate shift.
     instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
   } else {
-    // register shift
+    // Register shift.
     ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
     instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
   }
   emit(instr | rn.code()*B16 | rd.code()*B12);
   if (rn.is(pc) || x.rm_.is(pc))
-    // block constant pool emission for one instruction after reading pc
+    // Block constant pool emission for one instruction after reading pc.
     BlockConstPoolBefore(pc_offset() + kInstrSize);
 }
 
@@ -666,15 +676,15 @@
   ASSERT((instr & ~(CondMask | B | L)) == B26);
   int am = x.am_;
   if (!x.rm_.is_valid()) {
-    // immediate offset
+    // Immediate offset.
     int offset_12 = x.offset_;
     if (offset_12 < 0) {
       offset_12 = -offset_12;
       am ^= U;
     }
     if (!is_uint12(offset_12)) {
-      // immediate offset cannot be encoded, load it first to register ip
-      // rn (and rd in a load) should never be ip, or will be trashed
+      // Immediate offset cannot be encoded, load it first to register ip
+      // rn (and rd in a load) should never be ip, or will be trashed.
       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
       mov(ip, Operand(x.offset_), LeaveCC,
           static_cast<Condition>(instr & CondMask));
@@ -684,9 +694,9 @@
     ASSERT(offset_12 >= 0);  // no masking needed
     instr |= offset_12;
   } else {
-    // register offset (shift_imm_ and shift_op_ are 0) or scaled
+    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
     // register offset the constructors make sure than both shift_imm_
-    // and shift_op_ are initialized
+    // and shift_op_ are initialized.
     ASSERT(!x.rm_.is(pc));
     instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
   }
@@ -700,15 +710,15 @@
   ASSERT(x.rn_.is_valid());
   int am = x.am_;
   if (!x.rm_.is_valid()) {
-    // immediate offset
+    // Immediate offset.
     int offset_8 = x.offset_;
     if (offset_8 < 0) {
       offset_8 = -offset_8;
       am ^= U;
     }
     if (!is_uint8(offset_8)) {
-      // immediate offset cannot be encoded, load it first to register ip
-      // rn (and rd in a load) should never be ip, or will be trashed
+      // Immediate offset cannot be encoded, load it first to register ip
+      // rn (and rd in a load) should never be ip, or will be trashed.
       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
       mov(ip, Operand(x.offset_), LeaveCC,
           static_cast<Condition>(instr & CondMask));
@@ -718,15 +728,15 @@
     ASSERT(offset_8 >= 0);  // no masking needed
     instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
   } else if (x.shift_imm_ != 0) {
-    // scaled register offset not supported, load index first
-    // rn (and rd in a load) should never be ip, or will be trashed
+    // Scaled register offset not supported, load index first
+    // rn (and rd in a load) should never be ip, or will be trashed.
     ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
     mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
         static_cast<Condition>(instr & CondMask));
     addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
     return;
   } else {
-    // register offset
+    // Register offset.
     ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
     instr |= x.rm_.code();
   }
@@ -744,7 +754,7 @@
 
 
 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
-  // unindexed addressing is not encoded by this function
+  // Unindexed addressing is not encoded by this function.
   ASSERT_EQ((B27 | B26),
             (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
   ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
@@ -759,7 +769,7 @@
   ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
 
-  // post-indexed addressing requires W == 1; different than in addrmod2/3
+  // Post-indexed addressing requires W == 1; different than in addrmod2/3.
   if ((am & P) == 0)
     am |= W;
 
@@ -782,7 +792,7 @@
   }
 
   // Block the emission of the constant pool, since the branch instruction must
-  // be emitted at the pc offset recorded by the label
+  // be emitted at the pc offset recorded by the label.
   BlockConstPoolBefore(pc_offset() + kInstrSize);
   return target_pos - (pc_offset() + kPcLoadDelta);
 }
@@ -804,7 +814,7 @@
 }
 
 
-// Branch instructions
+// Branch instructions.
 void Assembler::b(int branch_offset, Condition cond) {
   ASSERT((branch_offset & 3) == 0);
   int imm24 = branch_offset >> 2;
@@ -812,7 +822,7 @@
   emit(cond | B27 | B25 | (imm24 & Imm24Mask));
 
   if (cond == al)
-    // dead code is a good location to emit the constant pool
+    // Dead code is a good location to emit the constant pool.
     CheckConstPool(false, false);
 }
 
@@ -849,7 +859,22 @@
 }
 
 
-// Data-processing instructions
+// Data-processing instructions.
+
+// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
+// Instruction details available in ARM DDI 0406A, A8-464.
+// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
+//  Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
+void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
+                     const Operand& src3, Condition cond) {
+  ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
+  ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
+  ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
+  emit(cond | 0x3F*B21 | src3.imm32_*B16 |
+       dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
+}
+
+
 void Assembler::and_(Register dst, Register src1, const Operand& src2,
                      SBit s, Condition cond) {
   addrmod1(cond | 0*B21 | s, src1, dst, src2);
@@ -886,7 +911,7 @@
   if (FLAG_push_pop_elimination &&
       last_bound_pos_ <= (pc_offset() - pattern_size) &&
       reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-      // pattern
+      // Pattern.
       instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
       (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
     pc_ -= 2 * kInstrSize;
@@ -960,7 +985,7 @@
 }
 
 
-// Multiply instructions
+// Multiply instructions.
 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
                     SBit s, Condition cond) {
   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
@@ -1029,7 +1054,7 @@
 }
 
 
-// Miscellaneous arithmetic instructions
+// Miscellaneous arithmetic instructions.
 void Assembler::clz(Register dst, Register src, Condition cond) {
   // v5 and above.
   ASSERT(!dst.is(pc) && !src.is(pc));
@@ -1038,7 +1063,7 @@
 }
 
 
-// Status register access instructions
+// Status register access instructions.
 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
   ASSERT(!dst.is(pc));
   emit(cond | B24 | s | 15*B16 | dst.code()*B12);
@@ -1050,12 +1075,12 @@
   ASSERT(fields >= B16 && fields < B20);  // at least one field set
   Instr instr;
   if (!src.rm_.is_valid()) {
-    // immediate
+    // Immediate.
     uint32_t rotate_imm;
     uint32_t immed_8;
     if (MustUseIp(src.rmode_) ||
         !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
-      // immediate operand cannot be encoded, load it first to register ip
+      // Immediate operand cannot be encoded, load it first to register ip.
       RecordRelocInfo(src.rmode_, src.imm32_);
       ldr(ip, MemOperand(pc, 0), cond);
       msr(fields, Operand(ip), cond);
@@ -1070,7 +1095,7 @@
 }
 
 
-// Load/Store instructions
+// Load/Store instructions.
 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
   if (dst.is(pc)) {
     WriteRecordedPositions();
@@ -1085,7 +1110,7 @@
   if (FLAG_push_pop_elimination &&
       last_bound_pos_ <= (pc_offset() - pattern_size) &&
       reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-      // pattern
+      // Pattern.
       instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
       instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
     pc_ -= 2 * kInstrSize;
@@ -1106,6 +1131,7 @@
   if (FLAG_push_pop_elimination &&
      last_bound_pos_ <= (pc_offset() - pattern_size) &&
      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+     // Pattern.
      instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
      instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
     pc_ -= 2 * kInstrSize;
@@ -1147,17 +1173,17 @@
 }
 
 
-// Load/Store multiple instructions
+// Load/Store multiple instructions.
 void Assembler::ldm(BlockAddrMode am,
                     Register base,
                     RegList dst,
                     Condition cond) {
-  // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable
+  // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable.
   ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
 
   addrmod4(cond | B27 | am | L, base, dst);
 
-  // emit the constant pool after a function return implemented by ldm ..{..pc}
+  // Emit the constant pool after a function return implemented by ldm ..{..pc}.
   if (cond == al && (dst & pc.bit()) != 0) {
     // There is a slight chance that the ldm instruction was actually a call,
     // in which case it would be wrong to return into the constant pool; we
@@ -1177,7 +1203,7 @@
 }
 
 
-// Semaphore instructions
+// Semaphore instructions.
 void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
   ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
   ASSERT(!dst.is(base) && !src.is(base));
@@ -1197,7 +1223,7 @@
 }
 
 
-// Exception-generating instructions and debugging support
+// Exception-generating instructions and debugging support.
 void Assembler::stop(const char* msg) {
 #if !defined(__arm__)
   // The simulator handles these special instructions and stops execution.
@@ -1222,7 +1248,7 @@
 }
 
 
-// Coprocessor instructions
+// Coprocessor instructions.
 void Assembler::cdp(Coprocessor coproc,
                     int opcode_1,
                     CRegister crd,
@@ -1307,7 +1333,7 @@
                     int option,
                     LFlag l,
                     Condition cond) {
-  // unindexed addressing
+  // Unindexed addressing.
   ASSERT(is_uint8(option));
   emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
        coproc*B8 | (option & 255));
@@ -1346,7 +1372,7 @@
                     int option,
                     LFlag l,
                     Condition cond) {
-  // unindexed addressing
+  // Unindexed addressing.
   ASSERT(is_uint8(option));
   emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
        coproc*B8 | (option & 255));
@@ -1464,7 +1490,7 @@
                      const Condition cond) {
   // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
   // Instruction details available in ARM DDI 0406A, A8-576.
-  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
+  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
@@ -1571,14 +1597,14 @@
 }
 
 
-// Pseudo instructions
+// Pseudo instructions.
 void Assembler::lea(Register dst,
                     const MemOperand& x,
                     SBit s,
                     Condition cond) {
   int am = x.am_;
   if (!x.rm_.is_valid()) {
-    // immediate offset
+    // Immediate offset.
     if ((am & P) == 0)  // post indexing
       mov(dst, Operand(x.rn_), s, cond);
     else if ((am & U) == 0)  // negative indexing
@@ -1612,7 +1638,7 @@
 }
 
 
-// Debugging
+// Debugging.
 void Assembler::RecordJSReturn() {
   WriteRecordedPositions();
   CheckBuffer();
@@ -1665,7 +1691,7 @@
 void Assembler::GrowBuffer() {
   if (!own_buffer_) FATAL("external code buffer is too small");
 
-  // compute new buffer size
+  // Compute new buffer size.
   CodeDesc desc;  // the new buffer
   if (buffer_size_ < 4*KB) {
     desc.buffer_size = 4*KB;
@@ -1676,20 +1702,20 @@
   }
   CHECK_GT(desc.buffer_size, 0);  // no overflow
 
-  // setup new buffer
+  // Setup new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
 
   desc.instr_size = pc_offset();
   desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
 
-  // copy the data
+  // Copy the data.
   int pc_delta = desc.buffer - buffer_;
   int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
   memmove(desc.buffer, buffer_, desc.instr_size);
   memmove(reloc_info_writer.pos() + rc_delta,
           reloc_info_writer.pos(), desc.reloc_size);
 
-  // switch buffers
+  // Switch buffers.
   DeleteArray(buffer_);
   buffer_ = desc.buffer;
   buffer_size_ = desc.buffer_size;
@@ -1697,11 +1723,11 @@
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
-  // none of our relocation types are pc relative pointing outside the code
+  // None of our relocation types are pc relative pointing outside the code
   // buffer nor pc absolute pointing inside the code buffer, so there is no need
-  // to relocate any emitted relocation entries
+  // to relocate any emitted relocation entries.
 
-  // relocate pending relocation entries
+  // Relocate pending relocation entries.
   for (int i = 0; i < num_prinfo_; i++) {
     RelocInfo& rinfo = prinfo_[i];
     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -1716,16 +1742,16 @@
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
-    // Adjust code for new modes
+    // Adjust code for new modes.
     ASSERT(RelocInfo::IsJSReturn(rmode)
            || RelocInfo::IsComment(rmode)
            || RelocInfo::IsPosition(rmode));
-    // these modes do not need an entry in the constant pool
+    // These modes do not need an entry in the constant pool.
   } else {
     ASSERT(num_prinfo_ < kMaxNumPRInfo);
     prinfo_[num_prinfo_++] = rinfo;
     // Make sure the constant pool is not emitted in place of the next
-    // instruction for which we just recorded relocation info
+    // instruction for which we just recorded relocation info.
     BlockConstPoolBefore(pc_offset() + kInstrSize);
   }
   if (rinfo.rmode() != RelocInfo::NONE) {
@@ -1752,7 +1778,7 @@
   // blocked for a specific range.
   next_buffer_check_ = pc_offset() + kCheckConstInterval;
 
-  // There is nothing to do if there are no pending relocation info entries
+  // There is nothing to do if there are no pending relocation info entries.
   if (num_prinfo_ == 0) return;
 
   // We emit a constant pool at regular intervals of about kDistBetweenPools
@@ -1778,10 +1804,11 @@
   // no_const_pool_before_, which is checked here. Also, recursive calls to
   // CheckConstPool are blocked by no_const_pool_before_.
   if (pc_offset() < no_const_pool_before_) {
-    // Emission is currently blocked; make sure we try again as soon as possible
+    // Emission is currently blocked; make sure we try again as soon as
+    // possible.
     next_buffer_check_ = no_const_pool_before_;
 
-    // Something is wrong if emission is forced and blocked at the same time
+    // Something is wrong if emission is forced and blocked at the same time.
     ASSERT(!force_emit);
     return;
   }
@@ -1795,23 +1822,23 @@
       jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
   while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
 
-  // Block recursive calls to CheckConstPool
+  // Block recursive calls to CheckConstPool.
   BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
                        num_prinfo_*kInstrSize);
   // Don't bother to check for the emit calls below.
   next_buffer_check_ = no_const_pool_before_;
 
-  // Emit jump over constant pool if necessary
+  // Emit jump over constant pool if necessary.
   Label after_pool;
   if (require_jump) b(&after_pool);
 
   RecordComment("[ Constant Pool");
 
-  // Put down constant pool marker
-  // "Undefined instruction" as specified by A3.1 Instruction set encoding
+  // Put down constant pool marker "Undefined instruction" as specified by
+  // A3.1 Instruction set encoding.
   emit(0x03000000 | num_prinfo_);
 
-  // Emit constant pool entries
+  // Emit constant pool entries.
   for (int i = 0; i < num_prinfo_; i++) {
     RelocInfo& rinfo = prinfo_[i];
     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -1819,8 +1846,8 @@
            rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
     Instr instr = instr_at(rinfo.pc());
 
-    // Instruction to patch must be a ldr/str [pc, #offset]
-    // P and U set, B and W clear, Rn == pc, offset12 still 0
+    // Instruction to patch must be a ldr/str [pc, #offset].
+    // P and U set, B and W clear, Rn == pc, offset12 still 0.
     ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
            (2*B25 | P | U | pc.code()*B16));
     int delta = pc_ - rinfo.pc() - 8;
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 208d583..f6b7a06 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -80,7 +80,7 @@
     return 1 << code_;
   }
 
-  // (unfortunately we can't make this private in a struct)
+  // Unfortunately we can't make this private in a struct.
   int code_;
 };
 
@@ -205,7 +205,7 @@
     return 1 << code_;
   }
 
-  // (unfortunately we can't make this private in a struct)
+  // Unfortunately we can't make this private in a struct.
   int code_;
 };
 
@@ -250,7 +250,7 @@
 };
 
 
-// Condition field in instructions
+// Condition field in instructions.
 enum Condition {
   eq =  0 << 28,  // Z set            equal.
   ne =  1 << 28,  // Z clear          not equal.
@@ -628,6 +628,9 @@
   void blx(Label* L)  { blx(branch_offset(L, false)); }  // v5 and above
 
   // Data-processing instructions
+  void ubfx(Register dst, Register src1, const Operand& src2,
+            const Operand& src3, Condition cond = al);
+
   void and_(Register dst, Register src1, const Operand& src2,
             SBit s = LeaveCC, Condition cond = al);
 
diff --git a/src/arm/assembler-thumb2-inl.h b/src/arm/assembler-thumb2-inl.h
index 3808ef0..9e0fc2f 100644
--- a/src/arm/assembler-thumb2-inl.h
+++ b/src/arm/assembler-thumb2-inl.h
@@ -174,20 +174,6 @@
 }
 
 
-Operand::Operand(Object** opp) {
-  rm_ = no_reg;
-  imm32_ = reinterpret_cast<int32_t>(opp);
-  rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Context** cpp) {
-  rm_ = no_reg;
-  imm32_ = reinterpret_cast<int32_t>(cpp);
-  rmode_ = RelocInfo::NONE;
-}
-
-
 Operand::Operand(Smi* value) {
   rm_ = no_reg;
   imm32_ =  reinterpret_cast<intptr_t>(value);
@@ -229,14 +215,24 @@
 
 
 Address Assembler::target_address_address_at(Address pc) {
-  Instr instr = Memory::int32_at(pc);
-  // Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
+  Address target_pc = pc;
+  Instr instr = Memory::int32_at(target_pc);
+  // If we have a bx instruction, the instruction before the bx is
+  // what we need to patch.
+  static const int32_t kBxInstMask = 0x0ffffff0;
+  static const int32_t kBxInstPattern = 0x012fff10;
+  if ((instr & kBxInstMask) == kBxInstPattern) {
+    target_pc -= kInstrSize;
+    instr = Memory::int32_at(target_pc);
+  }
+  // Verify that the instruction to patch is a
+  // ldr<cond> <Rd>, [pc +/- offset_12].
   ASSERT((instr & 0x0f7f0000) == 0x051f0000);
   int offset = instr & 0xfff;  // offset_12 is unsigned
   if ((instr & (1 << 23)) == 0) offset = -offset;  // U bit defines offset sign
   // Verify that the constant pool comes after the instruction referencing it.
   ASSERT(offset >= -4);
-  return pc + offset + 8;
+  return target_pc + offset + 8;
 }
 
 
diff --git a/src/arm/assembler-thumb2.cc b/src/arm/assembler-thumb2.cc
index 6c2b903..e31c429 100644
--- a/src/arm/assembler-thumb2.cc
+++ b/src/arm/assembler-thumb2.cc
@@ -30,9 +30,9 @@
 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
 // OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -51,9 +51,14 @@
   // If the compiler is allowed to use vfp then we can use vfp too in our
   // code generation.
 #if !defined(__arm__)
-  // For the simulator=arm build, always use VFP since the arm simulator has
-  // VFP support.
-  supported_ |= 1u << VFP3;
+  // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
+  if (FLAG_enable_vfp3) {
+      supported_ |= 1u << VFP3;
+  }
+  // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
+  if (FLAG_enable_armv7) {
+      supported_ |= 1u << ARMv7;
+  }
 #else
   if (Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
@@ -66,6 +71,11 @@
     supported_ |= 1u << VFP3;
     found_by_runtime_probing_ |= 1u << VFP3;
   }
+
+  if (OS::ArmCpuHasFeature(ARMv7)) {
+    supported_ |= 1u << ARMv7;
+    found_by_runtime_probing_ |= 1u << ARMv7;
+  }
 #endif
 }
 
@@ -83,9 +93,9 @@
 Register r5  = {  5 };
 Register r6  = {  6 };
 Register r7  = {  7 };
-Register r8  = {  8 };
+Register r8  = {  8 };  // Used as context register.
 Register r9  = {  9 };
-Register r10 = { 10 };
+Register r10 = { 10 };  // Used as roots register.
 Register fp  = { 11 };
 Register ip  = { 12 };
 Register sp  = { 13 };
@@ -264,9 +274,9 @@
 
 
 // -----------------------------------------------------------------------------
-// Implementation of Assembler
+// Implementation of Assembler.
 
-// Instruction encoding bits
+// Instruction encoding bits.
 enum {
   H   = 1 << 5,   // halfword (or byte)
   S6  = 1 << 6,   // signed (or unsigned)
@@ -299,14 +309,14 @@
   B26 = 1 << 26,
   B27 = 1 << 27,
 
-  // Instruction bit masks
+  // Instruction bit masks.
   RdMask     = 15 << 12,  // in str instruction
   CondMask   = 15 << 28,
   CoprocessorMask = 15 << 8,
   OpCodeMask = 15 << 21,  // in data-processing instructions
   Imm24Mask  = (1 << 24) - 1,
   Off12Mask  = (1 << 12) - 1,
-  // Reserved condition
+  // Reserved condition.
   nv = 15 << 28
 };
 
@@ -327,13 +337,13 @@
 // ldr pc, [pc, #XXX]
 const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
 
-// spare_buffer_
+// Spare buffer.
 static const int kMinimalBufferSize = 4*KB;
 static byte* spare_buffer_ = NULL;
 
 Assembler::Assembler(void* buffer, int buffer_size) {
   if (buffer == NULL) {
-    // do our own buffer management
+    // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
       buffer_size = kMinimalBufferSize;
 
@@ -351,14 +361,14 @@
     own_buffer_ = true;
 
   } else {
-    // use externally provided buffer instead
+    // Use externally provided buffer instead.
     ASSERT(buffer_size > 0);
     buffer_ = static_cast<byte*>(buffer);
     buffer_size_ = buffer_size;
     own_buffer_ = false;
   }
 
-  // setup buffer pointers
+  // Setup buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -386,11 +396,11 @@
 
 
 void Assembler::GetCode(CodeDesc* desc) {
-  // emit constant pool if necessary
+  // Emit constant pool if necessary.
   CheckConstPool(true, false);
   ASSERT(num_prinfo_ == 0);
 
-  // setup desc
+  // Setup code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -539,7 +549,7 @@
 void Assembler::link_to(Label* L, Label* appendix) {
   if (appendix->is_linked()) {
     if (L->is_linked()) {
-      // append appendix to L's list
+      // Append appendix to L's list.
       int fixup_pos;
       int link = L->pos();
       do {
@@ -549,7 +559,7 @@
       ASSERT(link == kEndOfChain);
       target_at_put(fixup_pos, appendix->pos());
     } else {
-      // L is empty, simply use appendix
+      // L is empty, simply use appendix.
       *L = *appendix;
     }
   }
@@ -575,12 +585,12 @@
 }
 
 
-// Low-level code emission routines depending on the addressing mode
+// Low-level code emission routines depending on the addressing mode.
 static bool fits_shifter(uint32_t imm32,
                          uint32_t* rotate_imm,
                          uint32_t* immed_8,
                          Instr* instr) {
-  // imm32 must be unsigned
+  // imm32 must be unsigned.
   for (int rot = 0; rot < 16; rot++) {
     uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
     if ((imm8 <= 0xff)) {
@@ -589,7 +599,7 @@
       return true;
     }
   }
-  // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+  // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
   if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
     if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
       *instr ^= 0x2*B21;
@@ -626,7 +636,7 @@
   CheckBuffer();
   ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
   if (!x.rm_.is_valid()) {
-    // immediate
+    // Immediate.
     uint32_t rotate_imm;
     uint32_t immed_8;
     if (MustUseIp(x.rmode_) ||
@@ -634,7 +644,7 @@
       // The immediate operand cannot be encoded as a shifter operand, so load
       // it first to register ip and change the original instruction to use ip.
       // However, if the original instruction is a 'mov rd, x' (not setting the
-      // condition code), then replace it with a 'ldr rd, [pc]'
+      // condition code), then replace it with a 'ldr rd, [pc]'.
       RecordRelocInfo(x.rmode_, x.imm32_);
       CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
       Condition cond = static_cast<Condition>(instr & CondMask);
@@ -648,16 +658,16 @@
     }
     instr |= I | rotate_imm*B8 | immed_8;
   } else if (!x.rs_.is_valid()) {
-    // immediate shift
+    // Immediate shift.
     instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
   } else {
-    // register shift
+    // Register shift.
     ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
     instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
   }
   emit(instr | rn.code()*B16 | rd.code()*B12);
   if (rn.is(pc) || x.rm_.is(pc))
-    // block constant pool emission for one instruction after reading pc
+    // Block constant pool emission for one instruction after reading pc.
     BlockConstPoolBefore(pc_offset() + kInstrSize);
 }
 
@@ -666,15 +676,15 @@
   ASSERT((instr & ~(CondMask | B | L)) == B26);
   int am = x.am_;
   if (!x.rm_.is_valid()) {
-    // immediate offset
+    // Immediate offset.
     int offset_12 = x.offset_;
     if (offset_12 < 0) {
       offset_12 = -offset_12;
       am ^= U;
     }
     if (!is_uint12(offset_12)) {
-      // immediate offset cannot be encoded, load it first to register ip
-      // rn (and rd in a load) should never be ip, or will be trashed
+      // Immediate offset cannot be encoded, load it first to register ip
+      // rn (and rd in a load) should never be ip, or will be trashed.
       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
       mov(ip, Operand(x.offset_), LeaveCC,
           static_cast<Condition>(instr & CondMask));
@@ -684,9 +694,9 @@
     ASSERT(offset_12 >= 0);  // no masking needed
     instr |= offset_12;
   } else {
-    // register offset (shift_imm_ and shift_op_ are 0) or scaled
+    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
     // register offset the constructors make sure than both shift_imm_
-    // and shift_op_ are initialized
+    // and shift_op_ are initialized.
     ASSERT(!x.rm_.is(pc));
     instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
   }
@@ -700,15 +710,15 @@
   ASSERT(x.rn_.is_valid());
   int am = x.am_;
   if (!x.rm_.is_valid()) {
-    // immediate offset
+    // Immediate offset.
     int offset_8 = x.offset_;
     if (offset_8 < 0) {
       offset_8 = -offset_8;
       am ^= U;
     }
     if (!is_uint8(offset_8)) {
-      // immediate offset cannot be encoded, load it first to register ip
-      // rn (and rd in a load) should never be ip, or will be trashed
+      // Immediate offset cannot be encoded, load it first to register ip
+      // rn (and rd in a load) should never be ip, or will be trashed.
       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
       mov(ip, Operand(x.offset_), LeaveCC,
           static_cast<Condition>(instr & CondMask));
@@ -718,15 +728,15 @@
     ASSERT(offset_8 >= 0);  // no masking needed
     instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
   } else if (x.shift_imm_ != 0) {
-    // scaled register offset not supported, load index first
-    // rn (and rd in a load) should never be ip, or will be trashed
+    // Scaled register offset not supported, load index first
+    // rn (and rd in a load) should never be ip, or will be trashed.
     ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
     mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
         static_cast<Condition>(instr & CondMask));
     addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
     return;
   } else {
-    // register offset
+    // Register offset.
     ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
     instr |= x.rm_.code();
   }
@@ -744,7 +754,7 @@
 
 
 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
-  // unindexed addressing is not encoded by this function
+  // Unindexed addressing is not encoded by this function.
   ASSERT_EQ((B27 | B26),
             (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
   ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
@@ -759,7 +769,7 @@
   ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
 
-  // post-indexed addressing requires W == 1; different than in addrmod2/3
+  // Post-indexed addressing requires W == 1; different than in addrmod2/3.
   if ((am & P) == 0)
     am |= W;
 
@@ -782,7 +792,7 @@
   }
 
   // Block the emission of the constant pool, since the branch instruction must
-  // be emitted at the pc offset recorded by the label
+  // be emitted at the pc offset recorded by the label.
   BlockConstPoolBefore(pc_offset() + kInstrSize);
   return target_pos - (pc_offset() + kPcLoadDelta);
 }
@@ -804,7 +814,7 @@
 }
 
 
-// Branch instructions
+// Branch instructions.
 void Assembler::b(int branch_offset, Condition cond) {
   ASSERT((branch_offset & 3) == 0);
   int imm24 = branch_offset >> 2;
@@ -812,7 +822,7 @@
   emit(cond | B27 | B25 | (imm24 & Imm24Mask));
 
   if (cond == al)
-    // dead code is a good location to emit the constant pool
+    // Dead code is a good location to emit the constant pool.
     CheckConstPool(false, false);
 }
 
@@ -849,7 +859,22 @@
 }
 
 
-// Data-processing instructions
+// Data-processing instructions.
+
+// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
+// Instruction details available in ARM DDI 0406A, A8-464.
+// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
+//  Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
+void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
+                     const Operand& src3, Condition cond) {
+  ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
+  ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
+  ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
+  emit(cond | 0x3F*B21 | src3.imm32_*B16 |
+       dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
+}
+
+
 void Assembler::and_(Register dst, Register src1, const Operand& src2,
                      SBit s, Condition cond) {
   addrmod1(cond | 0*B21 | s, src1, dst, src2);
@@ -886,7 +911,7 @@
   if (FLAG_push_pop_elimination &&
       last_bound_pos_ <= (pc_offset() - pattern_size) &&
       reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-      // pattern
+      // Pattern.
       instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
       (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
     pc_ -= 2 * kInstrSize;
@@ -960,7 +985,7 @@
 }
 
 
-// Multiply instructions
+// Multiply instructions.
 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
                     SBit s, Condition cond) {
   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
@@ -1029,7 +1054,7 @@
 }
 
 
-// Miscellaneous arithmetic instructions
+// Miscellaneous arithmetic instructions.
 void Assembler::clz(Register dst, Register src, Condition cond) {
   // v5 and above.
   ASSERT(!dst.is(pc) && !src.is(pc));
@@ -1038,7 +1063,7 @@
 }
 
 
-// Status register access instructions
+// Status register access instructions.
 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
   ASSERT(!dst.is(pc));
   emit(cond | B24 | s | 15*B16 | dst.code()*B12);
@@ -1050,12 +1075,12 @@
   ASSERT(fields >= B16 && fields < B20);  // at least one field set
   Instr instr;
   if (!src.rm_.is_valid()) {
-    // immediate
+    // Immediate.
     uint32_t rotate_imm;
     uint32_t immed_8;
     if (MustUseIp(src.rmode_) ||
         !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
-      // immediate operand cannot be encoded, load it first to register ip
+      // Immediate operand cannot be encoded, load it first to register ip.
       RecordRelocInfo(src.rmode_, src.imm32_);
       ldr(ip, MemOperand(pc, 0), cond);
       msr(fields, Operand(ip), cond);
@@ -1070,7 +1095,7 @@
 }
 
 
-// Load/Store instructions
+// Load/Store instructions.
 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
   if (dst.is(pc)) {
     WriteRecordedPositions();
@@ -1085,7 +1110,7 @@
   if (FLAG_push_pop_elimination &&
       last_bound_pos_ <= (pc_offset() - pattern_size) &&
       reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-      // pattern
+      // Pattern.
       instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
       instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
     pc_ -= 2 * kInstrSize;
@@ -1106,6 +1131,7 @@
   if (FLAG_push_pop_elimination &&
      last_bound_pos_ <= (pc_offset() - pattern_size) &&
      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+     // Pattern.
      instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
      instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
     pc_ -= 2 * kInstrSize;
@@ -1147,17 +1173,17 @@
 }
 
 
-// Load/Store multiple instructions
+// Load/Store multiple instructions.
 void Assembler::ldm(BlockAddrMode am,
                     Register base,
                     RegList dst,
                     Condition cond) {
-  // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable
+  // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable.
   ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
 
   addrmod4(cond | B27 | am | L, base, dst);
 
-  // emit the constant pool after a function return implemented by ldm ..{..pc}
+  // Emit the constant pool after a function return implemented by ldm ..{..pc}.
   if (cond == al && (dst & pc.bit()) != 0) {
     // There is a slight chance that the ldm instruction was actually a call,
     // in which case it would be wrong to return into the constant pool; we
@@ -1177,7 +1203,7 @@
 }
 
 
-// Semaphore instructions
+// Semaphore instructions.
 void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
   ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
   ASSERT(!dst.is(base) && !src.is(base));
@@ -1197,7 +1223,7 @@
 }
 
 
-// Exception-generating instructions and debugging support
+// Exception-generating instructions and debugging support.
 void Assembler::stop(const char* msg) {
 #if !defined(__arm__)
   // The simulator handles these special instructions and stops execution.
@@ -1222,7 +1248,7 @@
 }
 
 
-// Coprocessor instructions
+// Coprocessor instructions.
 void Assembler::cdp(Coprocessor coproc,
                     int opcode_1,
                     CRegister crd,
@@ -1307,7 +1333,7 @@
                     int option,
                     LFlag l,
                     Condition cond) {
-  // unindexed addressing
+  // Unindexed addressing.
   ASSERT(is_uint8(option));
   emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
        coproc*B8 | (option & 255));
@@ -1346,7 +1372,7 @@
                     int option,
                     LFlag l,
                     Condition cond) {
-  // unindexed addressing
+  // Unindexed addressing.
   ASSERT(is_uint8(option));
   emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
        coproc*B8 | (option & 255));
@@ -1371,6 +1397,36 @@
 
 
 // Support for VFP.
+void Assembler::vldr(const DwVfpRegister dst,
+                     const Register base,
+                     int offset,
+                     const Condition cond) {
+  // Ddst = MEM(Rbase + offset).
+  // Instruction details available in ARM DDI 0406A, A8-628.
+  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+  // Vdst(15-12) | 1011(11-8) | offset
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT(offset % 4 == 0);
+  emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+       0xB*B8 | ((offset / 4) & 255));
+}
+
+
+void Assembler::vstr(const DwVfpRegister src,
+                     const Register base,
+                     int offset,
+                     const Condition cond) {
+  // MEM(Rbase + offset) = Dsrc.
+  // Instruction details available in ARM DDI 0406A, A8-786.
+  // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
+  // Vsrc(15-12) | 1011(11-8) | (offset/4)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT(offset % 4 == 0);
+  emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
+       0xB*B8 | ((offset / 4) & 255));
+}
+
+
 void Assembler::vmov(const DwVfpRegister dst,
                      const Register src1,
                      const Register src2,
@@ -1434,7 +1490,7 @@
                      const Condition cond) {
   // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
   // Instruction details available in ARM DDI 0406A, A8-576.
-  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
+  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
@@ -1541,14 +1597,14 @@
 }
 
 
-// Pseudo instructions
+// Pseudo instructions.
 void Assembler::lea(Register dst,
                     const MemOperand& x,
                     SBit s,
                     Condition cond) {
   int am = x.am_;
   if (!x.rm_.is_valid()) {
-    // immediate offset
+    // Immediate offset.
     if ((am & P) == 0)  // post indexing
       mov(dst, Operand(x.rn_), s, cond);
     else if ((am & U) == 0)  // negative indexing
@@ -1582,7 +1638,7 @@
 }
 
 
-// Debugging
+// Debugging.
 void Assembler::RecordJSReturn() {
   WriteRecordedPositions();
   CheckBuffer();
@@ -1635,7 +1691,7 @@
 void Assembler::GrowBuffer() {
   if (!own_buffer_) FATAL("external code buffer is too small");
 
-  // compute new buffer size
+  // Compute new buffer size.
   CodeDesc desc;  // the new buffer
   if (buffer_size_ < 4*KB) {
     desc.buffer_size = 4*KB;
@@ -1646,20 +1702,20 @@
   }
   CHECK_GT(desc.buffer_size, 0);  // no overflow
 
-  // setup new buffer
+  // Setup new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
 
   desc.instr_size = pc_offset();
   desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
 
-  // copy the data
+  // Copy the data.
   int pc_delta = desc.buffer - buffer_;
   int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
   memmove(desc.buffer, buffer_, desc.instr_size);
   memmove(reloc_info_writer.pos() + rc_delta,
           reloc_info_writer.pos(), desc.reloc_size);
 
-  // switch buffers
+  // Switch buffers.
   DeleteArray(buffer_);
   buffer_ = desc.buffer;
   buffer_size_ = desc.buffer_size;
@@ -1667,11 +1723,11 @@
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
-  // none of our relocation types are pc relative pointing outside the code
+  // None of our relocation types are pc relative pointing outside the code
   // buffer nor pc absolute pointing inside the code buffer, so there is no need
-  // to relocate any emitted relocation entries
+  // to relocate any emitted relocation entries.
 
-  // relocate pending relocation entries
+  // Relocate pending relocation entries.
   for (int i = 0; i < num_prinfo_; i++) {
     RelocInfo& rinfo = prinfo_[i];
     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -1686,16 +1742,16 @@
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
-    // Adjust code for new modes
+    // Adjust code for new modes.
     ASSERT(RelocInfo::IsJSReturn(rmode)
            || RelocInfo::IsComment(rmode)
            || RelocInfo::IsPosition(rmode));
-    // these modes do not need an entry in the constant pool
+    // These modes do not need an entry in the constant pool.
   } else {
     ASSERT(num_prinfo_ < kMaxNumPRInfo);
     prinfo_[num_prinfo_++] = rinfo;
     // Make sure the constant pool is not emitted in place of the next
-    // instruction for which we just recorded relocation info
+    // instruction for which we just recorded relocation info.
     BlockConstPoolBefore(pc_offset() + kInstrSize);
   }
   if (rinfo.rmode() != RelocInfo::NONE) {
@@ -1722,7 +1778,7 @@
   // blocked for a specific range.
   next_buffer_check_ = pc_offset() + kCheckConstInterval;
 
-  // There is nothing to do if there are no pending relocation info entries
+  // There is nothing to do if there are no pending relocation info entries.
   if (num_prinfo_ == 0) return;
 
   // We emit a constant pool at regular intervals of about kDistBetweenPools
@@ -1748,10 +1804,11 @@
   // no_const_pool_before_, which is checked here. Also, recursive calls to
   // CheckConstPool are blocked by no_const_pool_before_.
   if (pc_offset() < no_const_pool_before_) {
-    // Emission is currently blocked; make sure we try again as soon as possible
+    // Emission is currently blocked; make sure we try again as soon as
+    // possible.
     next_buffer_check_ = no_const_pool_before_;
 
-    // Something is wrong if emission is forced and blocked at the same time
+    // Something is wrong if emission is forced and blocked at the same time.
     ASSERT(!force_emit);
     return;
   }
@@ -1765,23 +1822,23 @@
       jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
   while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
 
-  // Block recursive calls to CheckConstPool
+  // Block recursive calls to CheckConstPool.
   BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
                        num_prinfo_*kInstrSize);
   // Don't bother to check for the emit calls below.
   next_buffer_check_ = no_const_pool_before_;
 
-  // Emit jump over constant pool if necessary
+  // Emit jump over constant pool if necessary.
   Label after_pool;
   if (require_jump) b(&after_pool);
 
   RecordComment("[ Constant Pool");
 
-  // Put down constant pool marker
-  // "Undefined instruction" as specified by A3.1 Instruction set encoding
+  // Put down constant pool marker "Undefined instruction" as specified by
+  // A3.1 Instruction set encoding.
   emit(0x03000000 | num_prinfo_);
 
-  // Emit constant pool entries
+  // Emit constant pool entries.
   for (int i = 0; i < num_prinfo_; i++) {
     RelocInfo& rinfo = prinfo_[i];
     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -1789,8 +1846,8 @@
            rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
     Instr instr = instr_at(rinfo.pc());
 
-    // Instruction to patch must be a ldr/str [pc, #offset]
-    // P and U set, B and W clear, Rn == pc, offset12 still 0
+    // Instruction to patch must be a ldr/str [pc, #offset].
+    // P and U set, B and W clear, Rn == pc, offset12 still 0.
     ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
            (2*B25 | P | U | pc.code()*B16));
     int delta = pc_ - rinfo.pc() - 8;
diff --git a/src/arm/assembler-thumb2.h b/src/arm/assembler-thumb2.h
index 31e9487..869ac46 100644
--- a/src/arm/assembler-thumb2.h
+++ b/src/arm/assembler-thumb2.h
@@ -30,9 +30,9 @@
 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
 // OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 // A light-weight ARM Assembler
 // Generates user mode instructions for the ARM architecture up to version 5
@@ -80,7 +80,7 @@
     return 1 << code_;
   }
 
-  // (unfortunately we can't make this private in a struct)
+  // Unfortunately we can't make this private in a struct.
   int code_;
 };
 
@@ -205,7 +205,7 @@
     return 1 << code_;
   }
 
-  // (unfortunately we can't make this private in a struct)
+  // Unfortunately we can't make this private in a struct.
   int code_;
 };
 
@@ -250,7 +250,7 @@
 };
 
 
-// Condition field in instructions
+// Condition field in instructions.
 enum Condition {
   eq =  0 << 28,  // Z set            equal.
   ne =  1 << 28,  // Z clear          not equal.
@@ -398,8 +398,6 @@
          RelocInfo::Mode rmode = RelocInfo::NONE));
   INLINE(explicit Operand(const ExternalReference& f));
   INLINE(explicit Operand(const char* s));
-  INLINE(explicit Operand(Object** opp));
-  INLINE(explicit Operand(Context** cpp));
   explicit Operand(Handle<Object> handle);
   INLINE(explicit Operand(Smi* value));
 
@@ -630,6 +628,9 @@
   void blx(Label* L)  { blx(branch_offset(L, false)); }  // v5 and above
 
   // Data-processing instructions
+  void ubfx(Register dst, Register src1, const Operand& src2,
+            const Operand& src3, Condition cond = al);
+
   void and_(Register dst, Register src1, const Operand& src2,
             SBit s = LeaveCC, Condition cond = al);
 
@@ -796,6 +797,14 @@
   // However, some simple modifications can allow
   // these APIs to support D16 to D31.
 
+  void vldr(const DwVfpRegister dst,
+            const Register base,
+            int offset,  // Offset must be a multiple of 4.
+            const Condition cond = al);
+  void vstr(const DwVfpRegister src,
+            const Register base,
+            int offset,  // Offset must be a multiple of 4.
+            const Condition cond = al);
   void vmov(const DwVfpRegister dst,
             const Register src1,
             const Register src2,
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index ae7dae3..edb1b0a 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -499,7 +499,10 @@
   // r0: number of arguments
   // r1: called object
   __ bind(&non_function_call);
-
+  // CALL_NON_FUNCTION expects the non-function constructor as receiver
+  // (instead of the original receiver from the call site).  The receiver is
+  // stack element argc.
+  __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
   // Set expected number of arguments to zero (not changing r0).
   __ mov(r2, Operand(0));
   __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
@@ -904,7 +907,7 @@
 
 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
   // 1. Make sure we have at least one argument.
-  // r0: actual number of argument
+  // r0: actual number of arguments
   { Label done;
     __ tst(r0, Operand(r0));
     __ b(ne, &done);
@@ -914,40 +917,31 @@
     __ bind(&done);
   }
 
-  // 2. Get the function to call from the stack.
-  // r0: actual number of argument
-  { Label done, non_function, function;
-    __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
-    __ tst(r1, Operand(kSmiTagMask));
-    __ b(eq, &non_function);
-    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-    __ b(eq, &function);
+  // 2. Get the function to call (passed as receiver) from the stack, check
+  //    if it is a function.
+  // r0: actual number of arguments
+  Label non_function;
+  __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &non_function);
+  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+  __ b(ne, &non_function);
 
-    // Non-function called: Clear the function to force exception.
-    __ bind(&non_function);
-    __ mov(r1, Operand(0));
-    __ b(&done);
-
-    // Change the context eagerly because it will be used below to get the
-    // right global object.
-    __ bind(&function);
-    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
-    __ bind(&done);
-  }
-
-  // 3. Make sure first argument is an object; convert if necessary.
+  // 3a. Patch the first argument if necessary when calling a function.
   // r0: actual number of arguments
   // r1: function
-  { Label call_to_object, use_global_receiver, patch_receiver, done;
+  Label shift_arguments;
+  { Label convert_to_object, use_global_receiver, patch_receiver;
+    // Change context eagerly in case we need the global receiver.
+    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
     __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
     __ ldr(r2, MemOperand(r2, -kPointerSize));
-
     // r0: actual number of arguments
     // r1: function
     // r2: first argument
     __ tst(r2, Operand(kSmiTagMask));
-    __ b(eq, &call_to_object);
+    __ b(eq, &convert_to_object);
 
     __ LoadRoot(r3, Heap::kNullValueRootIndex);
     __ cmp(r2, r3);
@@ -957,31 +951,28 @@
     __ b(eq, &use_global_receiver);
 
     __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
-    __ b(lt, &call_to_object);
+    __ b(lt, &convert_to_object);
     __ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
-    __ b(le, &done);
+    __ b(le, &shift_arguments);
 
-    __ bind(&call_to_object);
-    __ EnterInternalFrame();
-
-    // Store number of arguments and function across the call into the runtime.
-    __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+    __ bind(&convert_to_object);
+    __ EnterInternalFrame();  // In order to preserve argument count.
+    __ mov(r0, Operand(r0, LSL, kSmiTagSize));  // Smi-tagged.
     __ push(r0);
-    __ push(r1);
 
     __ push(r2);
     __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
     __ mov(r2, r0);
 
-    // Restore number of arguments and function.
-    __ pop(r1);
     __ pop(r0);
     __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-
     __ LeaveInternalFrame();
-    __ b(&patch_receiver);
+    // Restore the function to r1.
+    __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+    __ jmp(&patch_receiver);
 
-    // Use the global receiver object from the called function as the receiver.
+    // Use the global receiver object from the called function as the
+    // receiver.
     __ bind(&use_global_receiver);
     const int kGlobalIndex =
         Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
@@ -994,16 +985,30 @@
     __ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
     __ str(r2, MemOperand(r3, -kPointerSize));
 
-    __ bind(&done);
+    __ jmp(&shift_arguments);
   }
 
-  // 4. Shift stuff one slot down the stack
-  // r0: actual number of arguments (including call() receiver)
+  // 3b. Patch the first argument when calling a non-function.  The
+  //     CALL_NON_FUNCTION builtin expects the non-function callee as
+  //     receiver, so overwrite the first argument which will ultimately
+  //     become the receiver.
+  // r0: actual number of arguments
   // r1: function
+  __ bind(&non_function);
+  __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
+  __ str(r1, MemOperand(r2, -kPointerSize));
+  // Clear r1 to indicate a non-function being called.
+  __ mov(r1, Operand(0));
+
+  // 4. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  // r0: actual number of arguments
+  // r1: function
+  __ bind(&shift_arguments);
   { Label loop;
     // Calculate the copy start address (destination). Copy end address is sp.
     __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
-    __ add(r2, r2, Operand(kPointerSize));  // copy receiver too
 
     __ bind(&loop);
     __ ldr(ip, MemOperand(r2, -kPointerSize));
@@ -1011,43 +1016,41 @@
     __ sub(r2, r2, Operand(kPointerSize));
     __ cmp(r2, sp);
     __ b(ne, &loop);
+    // Adjust the actual number of arguments and remove the top element
+    // (which is a copy of the last argument).
+    __ sub(r0, r0, Operand(1));
+    __ pop();
   }
 
-  // 5. Adjust the actual number of arguments and remove the top element.
-  // r0: actual number of arguments (including call() receiver)
-  // r1: function
-  __ sub(r0, r0, Operand(1));
-  __ add(sp, sp, Operand(kPointerSize));
-
-  // 6. Get the code for the function or the non-function builtin.
-  //    If number of expected arguments matches, then call. Otherwise restart
-  //    the arguments adaptor stub.
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
   // r0: actual number of arguments
   // r1: function
-  { Label invoke;
+  { Label function;
     __ tst(r1, r1);
-    __ b(ne, &invoke);
+    __ b(ne, &function);
     __ mov(r2, Operand(0));  // expected arguments is 0 for CALL_NON_FUNCTION
     __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
     __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
                          RelocInfo::CODE_TARGET);
-
-    __ bind(&invoke);
-    __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-    __ ldr(r2,
-           FieldMemOperand(r3,
-                           SharedFunctionInfo::kFormalParameterCountOffset));
-    __ ldr(r3,
-           MemOperand(r3, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
-    __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
-    __ cmp(r2, r0);  // Check formal and actual parameter counts.
-    __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
-                         RelocInfo::CODE_TARGET, ne);
-
-    // 7. Jump to the code in r3 without checking arguments.
-    ParameterCount expected(0);
-    __ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
+    __ bind(&function);
   }
+
+  // 5b. Get the code to call from the function and check that the number of
+  //     expected arguments matches what we're providing.  If so, jump
+  //     (tail-call) to the code in register edx without checking arguments.
+  // r0: actual number of arguments
+  // r1: function
+  __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r2,
+         FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+  __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ cmp(r2, r0);  // Check formal and actual parameter counts.
+  __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+          RelocInfo::CODE_TARGET, ne);
+
+  ParameterCount expected(0);
+  __ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
 }
 
 
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index ea4b165..046d7b9 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -121,14 +121,10 @@
 // -------------------------------------------------------------------------
 // CodeGenerator implementation
 
-CodeGenerator::CodeGenerator(MacroAssembler* masm,
-                             Handle<Script> script,
-                             bool is_eval)
-    : is_eval_(is_eval),
-      script_(script),
-      deferred_(8),
+CodeGenerator::CodeGenerator(MacroAssembler* masm)
+    : deferred_(8),
       masm_(masm),
-      scope_(NULL),
+      info_(NULL),
       frame_(NULL),
       allocator_(NULL),
       cc_reg_(al),
@@ -137,23 +133,21 @@
 }
 
 
+Scope* CodeGenerator::scope() { return info_->function()->scope(); }
+
+
 // Calling conventions:
 // fp: caller's frame pointer
 // sp: stack pointer
 // r1: called JS function
 // cp: callee's context
 
-void CodeGenerator::Generate(FunctionLiteral* fun,
-                             Mode mode,
-                             CompilationInfo* info) {
+void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
   // Record the position for debugging purposes.
-  CodeForFunctionPosition(fun);
-
-  ZoneList<Statement*>* body = fun->body();
+  CodeForFunctionPosition(info->function());
 
   // Initialize state.
-  ASSERT(scope_ == NULL);
-  scope_ = fun->scope();
+  info_ = info;
   ASSERT(allocator_ == NULL);
   RegisterAllocator register_allocator(this);
   allocator_ = &register_allocator;
@@ -174,7 +168,7 @@
 
 #ifdef DEBUG
     if (strlen(FLAG_stop_at) > 0 &&
-        fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
       frame_->SpillAll();
       __ stop("stop-at");
     }
@@ -189,7 +183,7 @@
       frame_->AllocateStackSlots();
 
       VirtualFrame::SpilledScope spilled_scope;
-      int heap_slots = scope_->num_heap_slots();
+      int heap_slots = scope()->num_heap_slots();
       if (heap_slots > 0) {
         // Allocate local context.
         // Get outer context and create a new context based on it.
@@ -219,7 +213,6 @@
       // 3) don't copy parameter operand code from SlotOperand!
       {
         Comment cmnt2(masm_, "[ copy context parameters into .context");
-
         // Note that iteration order is relevant here! If we have the same
         // parameter twice (e.g., function (x, y, x)), and that parameter
         // needs to be copied into the context, it must be the last argument
@@ -228,12 +221,11 @@
         // order: such a parameter is copied repeatedly into the same
         // context location and thus the last value is what is seen inside
         // the function.
-        for (int i = 0; i < scope_->num_parameters(); i++) {
-          Variable* par = scope_->parameter(i);
+        for (int i = 0; i < scope()->num_parameters(); i++) {
+          Variable* par = scope()->parameter(i);
           Slot* slot = par->slot();
           if (slot != NULL && slot->type() == Slot::CONTEXT) {
-            // No parameters in global scope.
-            ASSERT(!scope_->is_global_scope());
+            ASSERT(!scope()->is_global_scope());  // No params in global scope.
             __ ldr(r1, frame_->ParameterAt(i));
             // Loads r2 with context; used below in RecordWrite.
             __ str(r1, SlotOperand(slot, r2));
@@ -249,20 +241,20 @@
       // Store the arguments object.  This must happen after context
       // initialization because the arguments object may be stored in the
       // context.
-      if (scope_->arguments() != NULL) {
+      if (scope()->arguments() != NULL) {
         Comment cmnt(masm_, "[ allocate arguments object");
-        ASSERT(scope_->arguments_shadow() != NULL);
-        Variable* arguments = scope_->arguments()->var();
-        Variable* shadow = scope_->arguments_shadow()->var();
+        ASSERT(scope()->arguments_shadow() != NULL);
+        Variable* arguments = scope()->arguments()->var();
+        Variable* shadow = scope()->arguments_shadow()->var();
         ASSERT(arguments != NULL && arguments->slot() != NULL);
         ASSERT(shadow != NULL && shadow->slot() != NULL);
         ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
         __ ldr(r2, frame_->Function());
         // The receiver is below the arguments, the return address, and the
         // frame pointer on the stack.
-        const int kReceiverDisplacement = 2 + scope_->num_parameters();
+        const int kReceiverDisplacement = 2 + scope()->num_parameters();
         __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
-        __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+        __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
         frame_->Adjust(3);
         __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
         frame_->CallStub(&stub, 3);
@@ -273,10 +265,10 @@
       }
 
       // Initialize ThisFunction reference if present.
-      if (scope_->is_function_scope() && scope_->function() != NULL) {
+      if (scope()->is_function_scope() && scope()->function() != NULL) {
         __ mov(ip, Operand(Factory::the_hole_value()));
         frame_->EmitPush(ip);
-        StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+        StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
       }
     } else {
       // When used as the secondary compiler for splitting, r1, cp,
@@ -295,12 +287,12 @@
     // Generate code to 'execute' declarations and initialize functions
     // (source elements). In case of an illegal redeclaration we need to
     // handle that instead of processing the declarations.
-    if (scope_->HasIllegalRedeclaration()) {
+    if (scope()->HasIllegalRedeclaration()) {
       Comment cmnt(masm_, "[ illegal redeclarations");
-      scope_->VisitIllegalRedeclaration(this);
+      scope()->VisitIllegalRedeclaration(this);
     } else {
       Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope_->declarations());
+      ProcessDeclarations(scope()->declarations());
       // Bail out if a stack-overflow exception occurred when processing
       // declarations.
       if (HasStackOverflow()) return;
@@ -314,7 +306,7 @@
     // Compile the body of the function in a vanilla state. Don't
     // bother compiling all the code if the scope has an illegal
     // redeclaration.
-    if (!scope_->HasIllegalRedeclaration()) {
+    if (!scope()->HasIllegalRedeclaration()) {
       Comment cmnt(masm_, "[ function body");
 #ifdef DEBUG
       bool is_builtin = Bootstrapper::IsActive();
@@ -325,14 +317,14 @@
         // Ignore the return value.
       }
 #endif
-      VisitStatementsAndSpill(body);
+      VisitStatementsAndSpill(info->function()->body());
     }
   }
 
   // Generate the return sequence if necessary.
   if (has_valid_frame() || function_return_.is_linked()) {
     if (!function_return_.is_linked()) {
-      CodeForReturnPosition(fun);
+      CodeForReturnPosition(info->function());
     }
     // exit
     // r0: result
@@ -355,7 +347,7 @@
 
     // Calculate the exact length of the return sequence and make sure that
     // the constant pool is not emitted inside of the return sequence.
-    int32_t sp_delta = (scope_->num_parameters() + 1) * kPointerSize;
+    int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
     int return_sequence_length = Assembler::kJSReturnSequenceLength;
     if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
       // Additional mov instruction generated.
@@ -395,7 +387,6 @@
   }
 
   allocator_ = NULL;
-  scope_ = NULL;
 }
 
 
@@ -2302,8 +2293,7 @@
   Comment cmnt(masm_, "[ DebuggerStatament");
   CodeForStatementPosition(node);
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  DebuggerStatementStub ces;
-  frame_->CallStub(&ces, 0);
+  frame_->DebugBreak();
 #endif
   // Ignore the return value.
   ASSERT(frame_->height() == original_height);
@@ -2341,7 +2331,7 @@
 
   // Build the function boilerplate and instantiate it.
   Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(node, script_, this);
+      Compiler::BuildBoilerplate(node, script(), this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) {
     ASSERT(frame_->height() == original_height);
@@ -2728,9 +2718,9 @@
     frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
   }
   frame_->EmitPush(r0);  // save the result
-  // r0: created object literal
-
   for (int i = 0; i < node->properties()->length(); i++) {
+    // At the start of each iteration, the top of stack contains
+    // the newly created object literal.
     ObjectLiteral::Property* property = node->properties()->at(i);
     Literal* key = property->key();
     Expression* value = property->value();
@@ -2740,34 +2730,43 @@
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
         if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
         // else fall through
-      case ObjectLiteral::Property::COMPUTED:  // fall through
+      case ObjectLiteral::Property::COMPUTED:
+        if (key->handle()->IsSymbol()) {
+          Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+          LoadAndSpill(value);
+          frame_->EmitPop(r0);
+          __ mov(r2, Operand(key->handle()));
+          __ ldr(r1, frame_->Top());  // Load the receiver.
+          frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
+          break;
+        }
+        // else fall through
       case ObjectLiteral::Property::PROTOTYPE: {
+        __ ldr(r0, frame_->Top());
         frame_->EmitPush(r0);  // dup the result
         LoadAndSpill(key);
         LoadAndSpill(value);
         frame_->CallRuntime(Runtime::kSetProperty, 3);
-        // restore r0
-        __ ldr(r0, frame_->Top());
         break;
       }
       case ObjectLiteral::Property::SETTER: {
+        __ ldr(r0, frame_->Top());
         frame_->EmitPush(r0);
         LoadAndSpill(key);
         __ mov(r0, Operand(Smi::FromInt(1)));
         frame_->EmitPush(r0);
         LoadAndSpill(value);
         frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        __ ldr(r0, frame_->Top());
         break;
       }
       case ObjectLiteral::Property::GETTER: {
+        __ ldr(r0, frame_->Top());
         frame_->EmitPush(r0);
         LoadAndSpill(key);
         __ mov(r0, Operand(Smi::FromInt(0)));
         frame_->EmitPush(r0);
         LoadAndSpill(value);
         frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        __ ldr(r0, frame_->Top());
         break;
       }
     }
@@ -2785,17 +2784,19 @@
 
   // Load the function of this activation.
   __ ldr(r2, frame_->Function());
-  // Literals array.
+  // Load the literals array of the function.
   __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
-  // Literal index.
   __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
-  // Constant elements.
   __ mov(r0, Operand(node->constant_elements()));
   frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
+  int length = node->values()->length();
   if (node->depth() > 1) {
     frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else {
+  } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
     frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+  } else {
+    FastCloneShallowArrayStub stub(length);
+    frame_->CallStub(&stub, 3);
   }
   frame_->EmitPush(r0);  // save the result
   // r0: created object literal
@@ -3022,11 +3023,6 @@
     // ----------------------------------
     // JavaScript example: 'foo(1, 2, 3)'  // foo is global
     // ----------------------------------
-
-    // Push the name of the function and the receiver onto the stack.
-    __ mov(r0, Operand(var->name()));
-    frame_->EmitPush(r0);
-
     // Pass the global object as the receiver and let the IC stub
     // patch the stack to use the global proxy as 'this' in the
     // invoked function.
@@ -3038,15 +3034,14 @@
       LoadAndSpill(args->at(i));
     }
 
-    // Setup the receiver register and call the IC initialization code.
+    // Setup the name register and call the IC initialization code.
+    __ mov(r2, Operand(var->name()));
     InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
     Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
     CodeForSourcePosition(node->position());
     frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
                            arg_count + 1);
     __ ldr(cp, frame_->Context());
-    // Remove the function from the stack.
-    frame_->Drop();
     frame_->EmitPush(r0);
 
   } else if (var != NULL && var->slot() != NULL &&
@@ -3079,28 +3074,21 @@
       // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
       // ------------------------------------------------------------------
 
-      // Push the name of the function and the receiver onto the stack.
-      __ mov(r0, Operand(literal->handle()));
-      frame_->EmitPush(r0);
-      LoadAndSpill(property->obj());
-
+      LoadAndSpill(property->obj());  // Receiver.
       // Load the arguments.
       int arg_count = args->length();
       for (int i = 0; i < arg_count; i++) {
         LoadAndSpill(args->at(i));
       }
 
-      // Set the receiver register and call the IC initialization code.
+      // Set the name register and call the IC initialization code.
+      __ mov(r2, Operand(literal->handle()));
       InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
       Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
       CodeForSourcePosition(node->position());
       frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
       __ ldr(cp, frame_->Context());
-
-      // Remove the function from the stack.
-      frame_->Drop();
-
-      frame_->EmitPush(r0);  // push after get rid of function from the stack
+      frame_->EmitPush(r0);
 
     } else {
       // -------------------------------------------
@@ -3519,7 +3507,7 @@
 
   // Seed the result with the formal parameters count, which will be used
   // in case no arguments adaptor frame is found below the current frame.
-  __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
 
   // Call the shared stub to get to the arguments.length.
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
@@ -3536,7 +3524,7 @@
   // Load the key into r1 and the formal parameters count into r0.
   LoadAndSpill(args->at(0));
   frame_->EmitPop(r1);
-  __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
 
   // Call the shared stub to get to arguments[key].
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
@@ -3560,7 +3548,8 @@
   Load(args->at(0));
   Load(args->at(1));
 
-  frame_->CallRuntime(Runtime::kStringAdd, 2);
+  StringAddStub stub(NO_STRING_ADD_FLAGS);
+  frame_->CallStub(&stub, 2);
   frame_->EmitPush(r0);
 }
 
@@ -3572,7 +3561,8 @@
   Load(args->at(1));
   Load(args->at(2));
 
-  frame_->CallRuntime(Runtime::kSubString, 3);
+  SubStringStub stub;
+  frame_->CallStub(&stub, 3);
   frame_->EmitPush(r0);
 }
 
@@ -3602,6 +3592,17 @@
 }
 
 
+void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+
+  // Load the argument on the stack and jump to the runtime.
+  Load(args->at(0));
+
+  frame_->CallRuntime(Runtime::kNumberToString, 1);
+  frame_->EmitPush(r0);
+}
+
+
 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
   VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 2);
@@ -3633,8 +3634,6 @@
 
   if (function == NULL) {
     // Prepare stack for calling JS runtime function.
-    __ mov(r0, Operand(node->name()));
-    frame_->EmitPush(r0);
     // Push the builtins object found in the current global object.
     __ ldr(r1, GlobalObject());
     __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
@@ -3649,11 +3648,11 @@
 
   if (function == NULL) {
     // Call the JS runtime function.
+    __ mov(r2, Operand(node->name()));
     InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
     Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
     frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
     __ ldr(cp, frame_->Context());
-    frame_->Drop();
     frame_->EmitPush(r0);
   } else {
     // Call the C runtime function.
@@ -4396,11 +4395,11 @@
       Handle<String> name(GetName());
 
       frame->EmitPop(r0);
-      // Setup the name register.
+      frame->EmitPop(r1);
       __ mov(r2, Operand(name));
       frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
       frame->EmitPush(r0);
-      cgen_->UnloadReference(this);
+      set_unloaded();
       break;
     }
 
@@ -4412,7 +4411,6 @@
 
       // Call IC code.
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      // TODO(1222589): Make the IC grab the values from the stack.
       frame->EmitPop(r0);  // value
       frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
       frame->EmitPush(r0);
@@ -4487,7 +4485,7 @@
                         TAG_OBJECT);
 
   // Load the function from the stack.
-  __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+  __ ldr(r3, MemOperand(sp, 0));
 
   // Setup the object header.
   __ LoadRoot(r2, Heap::kContextMapRootIndex);
@@ -4523,6 +4521,69 @@
 }
 
 
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [sp]: constant elements.
+  // [sp + kPointerSize]: literal index.
+  // [sp + (2 * kPointerSize)]: literals array.
+
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+  int size = JSArray::kSize + elements_size;
+
+  // Load boilerplate object into r3 and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
+  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r3, ip);
+  __ b(eq, &slow_case);
+
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size / kPointerSize,
+                        r0,
+                        r1,
+                        r2,
+                        &slow_case,
+                        TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+      __ ldr(r1, FieldMemOperand(r3, i));
+      __ str(r1, FieldMemOperand(r0, i));
+    }
+  }
+
+  if (length_ > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+    __ add(r2, r0, Operand(JSArray::kSize));
+    __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+
+    // Copy the elements array.
+    for (int i = 0; i < elements_size; i += kPointerSize) {
+      __ ldr(r1, FieldMemOperand(r3, i));
+      __ str(r1, FieldMemOperand(r2, i));
+    }
+  }
+
+  // Return and remove the on-stack parameters.
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&slow_case);
+  ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
+  __ TailCallRuntime(runtime, 3, 1);
+}
+
+
 // Count leading zeros in a 32 bit word.  On ARM5 and later it uses the clz
 // instruction.  On pre-ARM5 hardware this routine gives the wrong answer for 0
 // (31 instead of 32).
@@ -5340,7 +5401,7 @@
     // r1 : first argument
     // r0 : second argument
     // sp[0] : second argument
-    // sp[1] : first argument
+    // sp[4] : first argument
 
     Label not_strings, not_string1, string1;
     __ tst(r1, Operand(kSmiTagMask));
@@ -5355,7 +5416,8 @@
     __ b(ge, &string1);
 
     // First and second argument are strings.
-    __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+    StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+    __ TailCallStub(&stub);
 
     // Only first argument is a string.
     __ bind(&string1);
@@ -5369,7 +5431,6 @@
     __ b(ge, &not_strings);
 
     // Only second argument is a string.
-    __ b(&not_strings);
     __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
 
     __ bind(&not_strings);
@@ -5851,6 +5912,7 @@
 }
 
 
+
 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
   // r1 : x
   // r0 : y
@@ -6043,9 +6105,7 @@
         case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
         case Token::SAR:
           // Remove tags from right operand.
-          __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // y
-          // Use only the 5 least significant bits of the shift count.
-          __ and_(r2, r2, Operand(0x1f));
+          __ GetLeastBitsFromSmi(r2, r0, 5);
           __ mov(r0, Operand(r1, ASR, r2));
           // Smi tag result.
           __ bic(r0, r0, Operand(kSmiTagMask));
@@ -6054,9 +6114,7 @@
           // Remove tags from operands.  We can't do this on a 31 bit number
           // because then the 0s get shifted into bit 30 instead of bit 31.
           __ mov(r3, Operand(r1, ASR, kSmiTagSize));  // x
-          __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // y
-          // Use only the 5 least significant bits of the shift count.
-          __ and_(r2, r2, Operand(0x1f));
+          __ GetLeastBitsFromSmi(r2, r0, 5);
           __ mov(r3, Operand(r3, LSR, r2));
           // Unsigned shift is not allowed to produce a negative number, so
           // check the sign bit and the sign bit after Smi tagging.
@@ -6068,9 +6126,7 @@
         case Token::SHL:
           // Remove tags from operands.
           __ mov(r3, Operand(r1, ASR, kSmiTagSize));  // x
-          __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // y
-          // Use only the 5 least significant bits of the shift count.
-          __ and_(r2, r2, Operand(0x1f));
+          __ GetLeastBitsFromSmi(r2, r0, 5);
           __ mov(r3, Operand(r3, LSL, r2));
           // Check that the signed result fits in a Smi.
           __ add(r2, r3, Operand(0x40000000), SetCC);
@@ -6478,8 +6534,7 @@
   // r1: function
   // r2: receiver
   // r3: argc
-  __ add(r4, sp, Operand((kNumCalleeSaved + 1)*kPointerSize));
-  __ ldr(r4, MemOperand(r4));  // argv
+  __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize));  // argv
 
   // Push a frame with special values setup to mark it as an entry frame.
   // r0: code entry
@@ -6597,7 +6652,7 @@
   __ b(gt, &slow);
 
   // Get the prototype of the function (r4 is result, r2 is scratch).
-  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+  __ ldr(r1, MemOperand(sp, 0));
   __ TryGetFunctionPrototype(r1, r4, r2, &slow);
 
   // Check that the function prototype is a JS object.
@@ -6712,20 +6767,102 @@
 
 
 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+  // sp[0] : number of parameters
+  // sp[4] : receiver displacement
+  // sp[8] : function
+
   // Check if the calling frame is an arguments adaptor frame.
-  Label runtime;
+  Label adaptor_frame, try_allocate, runtime;
   __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
   __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(ne, &runtime);
+  __ b(eq, &adaptor_frame);
+
+  // Get the length from the frame.
+  __ ldr(r1, MemOperand(sp, 0));
+  __ b(&try_allocate);
 
   // Patch the arguments.length and the parameters pointer.
-  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ str(r0, MemOperand(sp, 0 * kPointerSize));
-  __ add(r3, r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ bind(&adaptor_frame);
+  __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ str(r1, MemOperand(sp, 0));
+  __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
   __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
   __ str(r3, MemOperand(sp, 1 * kPointerSize));
 
+  // Try the new space allocation. Start out with computing the size
+  // of the arguments object and the elements array (in words, not
+  // bytes because AllocateInNewSpace expects words).
+  Label add_arguments_object;
+  __ bind(&try_allocate);
+  __ cmp(r1, Operand(0));
+  __ b(eq, &add_arguments_object);
+  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
+  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
+  __ bind(&add_arguments_object);
+  __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
+
+  // Do the allocation of both objects in one go.
+  __ AllocateInNewSpace(r1, r0, r2, r3, &runtime, TAG_OBJECT);
+
+  // Get the arguments boilerplate from the current (global) context.
+  int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+  __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
+  __ ldr(r4, MemOperand(r4, offset));
+
+  // Copy the JS object part.
+  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+    __ ldr(r3, FieldMemOperand(r4, i));
+    __ str(r3, FieldMemOperand(r0, i));
+  }
+
+  // Setup the callee in-object property.
+  ASSERT(Heap::arguments_callee_index == 0);
+  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+  __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
+
+  // Get the length (smi tagged) and set that as an in-object property too.
+  ASSERT(Heap::arguments_length_index == 1);
+  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+  __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
+
+  // If there are no actual arguments, we're done.
+  Label done;
+  __ cmp(r1, Operand(0));
+  __ b(eq, &done);
+
+  // Get the parameters pointer from the stack and untag the length.
+  __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
+  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
+
+  // Setup the elements pointer in the allocated arguments object and
+  // initialize the header in the elements fixed array.
+  __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
+  __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
+  __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
+
+  // Copy the fixed array slots.
+  Label loop;
+  // Setup r4 to point to the first array slot.
+  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ bind(&loop);
+  // Pre-decrement r2 with kPointerSize on each iteration.
+  // Pre-decrement in order to skip receiver.
+  __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
+  // Post-increment r4 with kPointerSize on each iteration.
+  __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
+  __ sub(r1, r1, Operand(1));
+  __ cmp(r1, Operand(0));
+  __ b(ne, &loop);
+
+  // Return and remove the on-stack parameters.
+  __ bind(&done);
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
   __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
@@ -6779,6 +6916,9 @@
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+  // of the original receiver from the call site).
+  __ str(r1, MemOperand(sp, argc_ * kPointerSize));
   __ mov(r0, Operand(argc_));  // Setup the number of arguments.
   __ mov(r2, Operand(0));
   __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
@@ -6837,6 +6977,340 @@
 }
 
 
+void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
+                                            Register dest,
+                                            Register src,
+                                            Register count,
+                                            Register scratch,
+                                            bool ascii) {
+  Label loop;
+  Label done;
+  // This loop just copies one character at a time, as it is only used for very
+  // short strings.
+  if (!ascii) {
+    __ add(count, count, Operand(count), SetCC);
+  } else {
+    __ cmp(count, Operand(0));
+  }
+  __ b(eq, &done);
+
+  __ bind(&loop);
+  __ ldrb(scratch, MemOperand(src, 1, PostIndex));
+  // Perform sub between load and dependent store to get the load time to
+  // complete.
+  __ sub(count, count, Operand(1), SetCC);
+  __ strb(scratch, MemOperand(dest, 1, PostIndex));
+  // last iteration.
+  __ b(gt, &loop);
+
+  __ bind(&done);
+}
+
+
+enum CopyCharactersFlags {
+  COPY_ASCII = 1,
+  DEST_ALWAYS_ALIGNED = 2
+};
+
+
+void StringStubBase::GenerateCopyCharactersLong(MacroAssembler* masm,
+                                                Register dest,
+                                                Register src,
+                                                Register count,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3,
+                                                Register scratch4,
+                                                Register scratch5,
+                                                int flags) {
+  bool ascii = (flags & COPY_ASCII) != 0;
+  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
+
+  if (dest_always_aligned && FLAG_debug_code) {
+    // Check that destination is actually word aligned if the flag says
+    // that it is.
+    __ tst(dest, Operand(kPointerAlignmentMask));
+    __ Check(eq, "Destination of copy not aligned.");
+  }
+
+  const int kReadAlignment = 4;
+  const int kReadAlignmentMask = kReadAlignment - 1;
+  // Ensure that reading an entire aligned word containing the last character
+  // of a string will not read outside the allocated area (because we pad up
+  // to kObjectAlignment).
+  ASSERT(kObjectAlignment >= kReadAlignment);
+  // Assumes word reads and writes are little endian.
+  // Nothing to do for zero characters.
+  Label done;
+  if (!ascii) {
+    __ add(count, count, Operand(count), SetCC);
+  } else {
+    __ cmp(count, Operand(0));
+  }
+  __ b(eq, &done);
+
+  // Assume that you cannot read (or write) unaligned.
+  Label byte_loop;
+  // Must copy at least eight bytes, otherwise just do it one byte at a time.
+  __ cmp(count, Operand(8));
+  __ add(count, dest, Operand(count));
+  Register limit = count;  // Read until src equals this.
+  __ b(lt, &byte_loop);
+
+  if (!dest_always_aligned) {
+    // Align dest by byte copying. Copies between zero and three bytes.
+    __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
+    Label dest_aligned;
+    __ b(eq, &dest_aligned);
+    __ cmp(scratch4, Operand(2));
+    __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
+    __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
+    __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
+    __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+    __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
+    __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
+    __ bind(&dest_aligned);
+  }
+
+  Label simple_loop;
+
+  __ sub(scratch4, dest, Operand(src));
+  __ and_(scratch4, scratch4, Operand(0x03), SetCC);
+  __ b(eq, &simple_loop);
+  // Shift register is number of bits in a source word that
+  // must be combined with bits in the next source word in order
+  // to create a destination word.
+
+  // Complex loop for src/dst that are not aligned the same way.
+  {
+    Label loop;
+    __ mov(scratch4, Operand(scratch4, LSL, 3));
+    Register left_shift = scratch4;
+    __ and_(src, src, Operand(~3));  // Round down to load previous word.
+    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
+    // Store the "shift" most significant bits of scratch in the least
+    // signficant bits (i.e., shift down by (32-shift)).
+    __ rsb(scratch2, left_shift, Operand(32));
+    Register right_shift = scratch2;
+    __ mov(scratch1, Operand(scratch1, LSR, right_shift));
+
+    __ bind(&loop);
+    __ ldr(scratch3, MemOperand(src, 4, PostIndex));
+    __ sub(scratch5, limit, Operand(dest));
+    __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
+    __ str(scratch1, MemOperand(dest, 4, PostIndex));
+    __ mov(scratch1, Operand(scratch3, LSR, right_shift));
+    // Loop if four or more bytes left to copy.
+    // Compare to eight, because we did the subtract before increasing dst.
+    __ sub(scratch5, scratch5, Operand(8), SetCC);
+    __ b(ge, &loop);
+  }
+  // There is now between zero and three bytes left to copy (negative that
+  // number is in scratch5), and between one and three bytes already read into
+  // scratch1 (eight times that number in scratch4). We may have read past
+  // the end of the string, but because objects are aligned, we have not read
+  // past the end of the object.
+  // Find the minimum of remaining characters to move and preloaded characters
+  // and write those as bytes.
+  __ add(scratch5, scratch5, Operand(4), SetCC);
+  __ b(eq, &done);
+  __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
+  // Move minimum of bytes read and bytes left to copy to scratch4.
+  __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
+  // Between one and three (value in scratch5) characters already read into
+  // scratch ready to write.
+  __ cmp(scratch5, Operand(2));
+  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
+  __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
+  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
+  __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
+  // Copy any remaining bytes.
+  __ b(&byte_loop);
+
+  // Simple loop.
+  // Copy words from src to dst, until less than four bytes left.
+  // Both src and dest are word aligned.
+  __ bind(&simple_loop);
+  {
+    Label loop;
+    __ bind(&loop);
+    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
+    __ sub(scratch3, limit, Operand(dest));
+    __ str(scratch1, MemOperand(dest, 4, PostIndex));
+    // Compare to 8, not 4, because we do the substraction before increasing
+    // dest.
+    __ cmp(scratch3, Operand(8));
+    __ b(ge, &loop);
+  }
+
+  // Copy bytes from src to dst until dst hits limit.
+  __ bind(&byte_loop);
+  __ cmp(dest, Operand(limit));
+  __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
+  __ b(ge, &done);
+  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+  __ b(&byte_loop);
+
+  __ bind(&done);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+
+  // Stack frame on entry.
+  //  lr: return address
+  //  sp[0]: to
+  //  sp[4]: from
+  //  sp[8]: string
+
+  // This stub is called from the native-call %_SubString(...), so
+  // nothing can be assumed about the arguments. It is tested that:
+  //  "string" is a sequential string,
+  //  both "from" and "to" are smis, and
+  //  0 <= from <= to <= string.length.
+  // If any of these assumptions fail, we call the runtime system.
+
+  static const int kToOffset = 0 * kPointerSize;
+  static const int kFromOffset = 1 * kPointerSize;
+  static const int kStringOffset = 2 * kPointerSize;
+
+
+  // Check bounds and smi-ness.
+  __ ldr(r7, MemOperand(sp, kToOffset));
+  __ ldr(r6, MemOperand(sp, kFromOffset));
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  // I.e., arithmetic shift right by one un-smi-tags.
+  __ mov(r2, Operand(r7, ASR, 1), SetCC);
+  __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
+  // If either r2 or r6 had the smi tag bit set, then carry is set now.
+  __ b(cs, &runtime);  // Either "from" or "to" is not a smi.
+  __ b(mi, &runtime);  // From is negative.
+
+  __ sub(r2, r2, Operand(r3), SetCC);
+  __ b(mi, &runtime);  // Fail if from > to.
+  // Handle sub-strings of length 2 and less in the runtime system.
+  __ cmp(r2, Operand(2));
+  __ b(le, &runtime);
+
+  // r2: length
+  // r6: from (smi)
+  // r7: to (smi)
+
+  // Make sure first argument is a sequential (or flat) string.
+  __ ldr(r5, MemOperand(sp, kStringOffset));
+  ASSERT_EQ(0, kSmiTag);
+  __ tst(r5, Operand(kSmiTagMask));
+  __ b(eq, &runtime);
+  Condition is_string = masm->IsObjectStringType(r5, r1);
+  __ b(NegateCondition(is_string), &runtime);
+
+  // r1: instance type
+  // r2: length
+  // r5: string
+  // r6: from (smi)
+  // r7: to (smi)
+  Label seq_string;
+  __ and_(r4, r1, Operand(kStringRepresentationMask));
+  ASSERT(kSeqStringTag < kConsStringTag);
+  ASSERT(kExternalStringTag > kConsStringTag);
+  __ cmp(r4, Operand(kConsStringTag));
+  __ b(gt, &runtime);  // External strings go to runtime.
+  __ b(lt, &seq_string);  // Sequential strings are handled directly.
+
+  // Cons string. Try to recurse (once) on the first substring.
+  // (This adds a little more generality than necessary to handle flattened
+  // cons strings, but not much).
+  __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
+  __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
+  __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+  __ tst(r1, Operand(kStringRepresentationMask));
+  ASSERT_EQ(0, kSeqStringTag);
+  __ b(ne, &runtime);  // Cons and External strings go to runtime.
+
+  // Definitly a sequential string.
+  __ bind(&seq_string);
+
+  // r1: instance type.
+  // r2: length
+  // r5: string
+  // r6: from (smi)
+  // r7: to (smi)
+  __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
+  __ cmp(r4, Operand(r7, ASR, 1));
+  __ b(lt, &runtime);  // Fail if to > length.
+
+  // r1: instance type.
+  // r2: result string length.
+  // r5: string.
+  // r6: from offset (smi)
+  // Check for flat ascii string.
+  Label non_ascii_flat;
+  __ tst(r1, Operand(kStringEncodingMask));
+  ASSERT_EQ(0, kTwoByteStringTag);
+  __ b(eq, &non_ascii_flat);
+
+  // Allocate the result.
+  __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
+
+  // r0: result string.
+  // r2: result string length.
+  // r5: string.
+  // r6: from offset (smi)
+  // Locate first character of result.
+  __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Locate 'from' character of string.
+  __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(r5, r5, Operand(r6, ASR, 1));
+
+  // r0: result string.
+  // r1: first character of result string.
+  // r2: result string length.
+  // r5: first character of sub string to copy.
+  ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
+  GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+                             COPY_ASCII | DEST_ALWAYS_ALIGNED);
+  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii_flat);
+  // r2: result string length.
+  // r5: string.
+  // r6: from offset (smi)
+  // Check for flat two byte string.
+
+  // Allocate the result.
+  __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
+
+  // r0: result string.
+  // r2: result string length.
+  // r5: string.
+  // Locate first character of result.
+  __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Locate 'from' character of string.
+    __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // As "from" is a smi it is 2 times the value which matches the size of a two
+  // byte character.
+  __ add(r5, r5, Operand(r6));
+
+  // r0: result string.
+  // r1: first character of result.
+  // r2: result length.
+  // r5: first character of string to copy.
+  ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
+  GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+                             DEST_ALWAYS_ALIGNED);
+  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  // Just jump to runtime to create the sub string.
+  __ bind(&runtime);
+  __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+}
 
 
 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
@@ -6898,12 +7372,10 @@
   Label runtime;
 
   // Stack frame on entry.
-  //  sp[0]: return address
-  //  sp[4]: right string
-  //  sp[8]: left string
-
-  __ ldr(r0, MemOperand(sp, 2 * kPointerSize));  // left
-  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));  // right
+  //  sp[0]: right string
+  //  sp[4]: left string
+  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // left
+  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // right
 
   Label not_same;
   __ cmp(r0, r1);
@@ -6932,6 +7404,220 @@
 }
 
 
+void StringAddStub::Generate(MacroAssembler* masm) {
+  Label string_add_runtime;
+  // Stack on entry:
+  // sp[0]: second argument.
+  // sp[4]: first argument.
+
+  // Load the two arguments.
+  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // First argument.
+  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
+
+  // Make sure that both arguments are strings if not known in advance.
+  if (string_check_) {
+    ASSERT_EQ(0, kSmiTag);
+    __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
+    // Load instance types.
+    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+    ASSERT_EQ(0, kStringTag);
+    // If either is not a string, go to runtime.
+    __ tst(r4, Operand(kIsNotStringMask));
+    __ tst(r5, Operand(kIsNotStringMask), eq);
+    __ b(ne, &string_add_runtime);
+  }
+
+  // Both arguments are strings.
+  // r0: first string
+  // r1: second string
+  // r4: first string instance type (if string_check_)
+  // r5: second string instance type (if string_check_)
+  {
+    Label strings_not_empty;
+    // Check if either of the strings are empty. In that case return the other.
+    __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
+    __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
+    __ cmp(r2, Operand(0));  // Test if first string is empty.
+    __ mov(r0, Operand(r1), LeaveCC, eq);  // If first is empty, return second.
+    __ cmp(r3, Operand(0), ne);  // Else test if second string is empty.
+    __ b(ne, &strings_not_empty);  // If either string was empty, return r0.
+
+    __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+    __ add(sp, sp, Operand(2 * kPointerSize));
+    __ Ret();
+
+    __ bind(&strings_not_empty);
+  }
+
+  // Both strings are non-empty.
+  // r0: first string
+  // r1: second string
+  // r2: length of first string
+  // r3: length of second string
+  // r4: first string instance type (if string_check_)
+  // r5: second string instance type (if string_check_)
+  // Look at the length of the result of adding the two strings.
+  Label string_add_flat_result;
+  // Adding two lengths can't overflow.
+  ASSERT(String::kMaxLength * 2 > String::kMaxLength);
+  __ add(r6, r2, Operand(r3));
+  // Use the runtime system when adding two one character strings, as it
+  // contains optimizations for this specific case using the symbol table.
+  __ cmp(r6, Operand(2));
+  __ b(eq, &string_add_runtime);
+  // Check if resulting string will be flat.
+  __ cmp(r6, Operand(String::kMinNonFlatLength));
+  __ b(lt, &string_add_flat_result);
+  // Handle exceptionally long strings in the runtime system.
+  ASSERT((String::kMaxLength & 0x80000000) == 0);
+  ASSERT(IsPowerOf2(String::kMaxLength + 1));
+  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
+  __ cmp(r6, Operand(String::kMaxLength + 1));
+  __ b(hs, &string_add_runtime);
+
+  // If result is not supposed to be flat, allocate a cons string object.
+  // If both strings are ascii the result is an ascii cons string.
+  if (!string_check_) {
+    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+  }
+  Label non_ascii, allocated;
+  ASSERT_EQ(0, kTwoByteStringTag);
+  __ tst(r4, Operand(kStringEncodingMask));
+  __ tst(r5, Operand(kStringEncodingMask), ne);
+  __ b(eq, &non_ascii);
+
+  // Allocate an ASCII cons string.
+  __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
+  __ bind(&allocated);
+  // Fill the fields of the cons string.
+  __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
+  __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
+  __ mov(r0, Operand(r7));
+  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii);
+  // Allocate a two byte cons string.
+  __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
+  __ jmp(&allocated);
+
+  // Handle creating a flat result. First check that both strings are
+  // sequential and that they have the same encoding.
+  // r0: first string
+  // r1: second string
+  // r2: length of first string
+  // r3: length of second string
+  // r4: first string instance type (if string_check_)
+  // r5: second string instance type (if string_check_)
+  // r6: sum of lengths.
+  __ bind(&string_add_flat_result);
+  if (!string_check_) {
+    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+  }
+  // Check that both strings are sequential.
+  ASSERT_EQ(0, kSeqStringTag);
+  __ tst(r4, Operand(kStringRepresentationMask));
+  __ tst(r5, Operand(kStringRepresentationMask), eq);
+  __ b(ne, &string_add_runtime);
+  // Now check if both strings have the same encoding (ASCII/Two-byte).
+  // r0: first string.
+  // r1: second string.
+  // r2: length of first string.
+  // r3: length of second string.
+  // r6: sum of lengths..
+  Label non_ascii_string_add_flat_result;
+  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
+  __ eor(r7, r4, Operand(r5));
+  __ tst(r7, Operand(kStringEncodingMask));
+  __ b(ne, &string_add_runtime);
+  // And see if it's ASCII or two-byte.
+  __ tst(r4, Operand(kStringEncodingMask));
+  __ b(eq, &non_ascii_string_add_flat_result);
+
+  // Both strings are sequential ASCII strings. We also know that they are
+  // short (since the sum of the lengths is less than kMinNonFlatLength).
+  __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
+  // Locate first character of result.
+  __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // r0: first character of first string.
+  // r1: second string.
+  // r2: length of first string.
+  // r3: length of second string.
+  // r6: first character of result.
+  // r7: result string.
+  GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
+
+  // Load second argument and locate first character.
+  __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // r1: first character of second string.
+  // r3: length of second string.
+  // r6: next character of result.
+  // r7: result string.
+  GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
+  __ mov(r0, Operand(r7));
+  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii_string_add_flat_result);
+  // Both strings are sequential two byte strings.
+  // r0: first string.
+  // r1: second string.
+  // r2: length of first string.
+  // r3: length of second string.
+  // r6: sum of length of strings.
+  __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
+  // r0: first string.
+  // r1: second string.
+  // r2: length of first string.
+  // r3: length of second string.
+  // r7: result string.
+
+  // Locate first character of result.
+  __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // r0: first character of first string.
+  // r1: second string.
+  // r2: length of first string.
+  // r3: length of second string.
+  // r6: first character of result.
+  // r7: result string.
+  GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
+
+  // Locate first character of second argument.
+  __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // r1: first character of second string.
+  // r3: length of second string.
+  // r6: next character of result (after copy of first string).
+  // r7: result string.
+  GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
+
+  __ mov(r0, Operand(r7));
+  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  // Just jump to runtime to add the two strings.
+  __ bind(&string_add_runtime);
+  __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 0384485..10e28f4 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -161,19 +161,15 @@
 
   // Takes a function literal, generates code for it. This function should only
   // be called by compiler.cc.
-  static Handle<Code> MakeCode(FunctionLiteral* fun,
-                               Handle<Script> script,
-                               bool is_eval,
-                               CompilationInfo* info);
+  static Handle<Code> MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
-  static void MakeCodePrologue(FunctionLiteral* fun);
+  static void MakeCodePrologue(CompilationInfo* info);
 
   // Allocate and install the code.
-  static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
-                                       MacroAssembler* masm,
+  static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
                                        Code::Flags flags,
-                                       Handle<Script> script);
+                                       CompilationInfo* info);
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
@@ -189,7 +185,7 @@
   // Accessors
   MacroAssembler* masm() { return masm_; }
   VirtualFrame* frame() const { return frame_; }
-  Handle<Script> script() { return script_; }
+  inline Handle<Script> script();
 
   bool has_valid_frame() const { return frame_ != NULL; }
 
@@ -212,16 +208,15 @@
 
  private:
   // Construction/Destruction
-  CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
+  explicit CodeGenerator(MacroAssembler* masm);
 
   // Accessors
-  Scope* scope() const { return scope_; }
+  inline bool is_eval();
+  Scope* scope();
 
   // Generating deferred code.
   void ProcessDeferred();
 
-  bool is_eval() { return is_eval_; }
-
   // State
   bool has_cc() const  { return cc_reg_ != al; }
   JumpTarget* true_target() const  { return state_->true_target(); }
@@ -249,7 +244,7 @@
   inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
 
   // Main code generation function
-  void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
+  void Generate(CompilationInfo* info, Mode mode);
 
   // The following are used by class Reference.
   void LoadReference(Reference* ref);
@@ -403,6 +398,9 @@
   // Support for direct calls from JavaScript to native RegExp code.
   void GenerateRegExpExec(ZoneList<Expression*>* args);
 
+  // Fast support for number to string.
+  void GenerateNumberToString(ZoneList<Expression*>* args);
+
   // Simple condition analysis.
   enum ConditionAnalysis {
     ALWAYS_TRUE,
@@ -425,16 +423,14 @@
   bool HasValidEntryRegisters();
 #endif
 
-  bool is_eval_;  // Tells whether code is generated for eval.
-
-  Handle<Script> script_;
   List<DeferredCode*> deferred_;
 
   // Assembler
   MacroAssembler* masm_;  // to generate code
 
+  CompilationInfo* info_;
+
   // Code generation state
-  Scope* scope_;
   VirtualFrame* frame_;
   RegisterAllocator* allocator_;
   Condition cc_reg_;
@@ -538,6 +534,74 @@
 };
 
 
+class StringStubBase: public CodeStub {
+ public:
+  // Generate code for copying characters using a simple loop. This should only
+  // be used in places where the number of characters is small and the
+  // additional setup and checking in GenerateCopyCharactersLong adds too much
+  // overhead. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  void GenerateCopyCharacters(MacroAssembler* masm,
+                              Register dest,
+                              Register src,
+                              Register count,
+                              Register scratch,
+                              bool ascii);
+
+  // Generate code for copying a large number of characters. This function
+  // is allowed to spend extra time setting up conditions to make copying
+  // faster. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  void GenerateCopyCharactersLong(MacroAssembler* masm,
+                                  Register dest,
+                                  Register src,
+                                  Register count,
+                                  Register scratch1,
+                                  Register scratch2,
+                                  Register scratch3,
+                                  Register scratch4,
+                                  Register scratch5,
+                                  int flags);
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+  NO_STRING_ADD_FLAGS = 0,
+  NO_STRING_CHECK_IN_STUB = 1 << 0  // Omit string check in stub.
+};
+
+
+class StringAddStub: public StringStubBase {
+ public:
+  explicit StringAddStub(StringAddFlags flags) {
+    string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+  }
+
+ private:
+  Major MajorKey() { return StringAdd; }
+  int MinorKey() { return string_check_ ? 0 : 1; }
+
+  void Generate(MacroAssembler* masm);
+
+  // Should the stub check whether arguments are strings?
+  bool string_check_;
+};
+
+
+class SubStringStub: public StringStubBase {
+ public:
+  SubStringStub() {}
+
+ private:
+  Major MajorKey() { return SubString; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+};
+
+
+
 class StringCompareStub: public CodeStub {
  public:
   StringCompareStub() { }
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 6eb5239..e6b61b4 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -128,7 +128,7 @@
   //  -- lr    : return address
   //  -- [sp]  : receiver
   // -----------------------------------
-  // Registers r0 and r2 contain objects that needs to be pushed on the
+  // Registers r0 and r2 contain objects that need to be pushed on the
   // expression stack of the fake JS frame.
   Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit());
 }
@@ -137,14 +137,14 @@
 void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
   // Calling convention for IC store (from ic-arm.cc).
   // ----------- S t a t e -------------
-  //  -- r0    : receiver
+  //  -- r0    : value
+  //  -- r1    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
-  // Registers r0 and r2 contain objects that needs to be pushed on the
+  // Registers r0, r1, and r2 contain objects that need to be pushed on the
   // expression stack of the fake JS frame.
-  Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit());
+  Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit());
 }
 
 
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 5b31455..127c160 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -429,12 +429,22 @@
       return 3;
     }
     case 'o': {
-      if (format[3] == '1') {
+      if ((format[3] == '1') && (format[4] == '2')) {
         // 'off12: 12-bit offset for load and store instructions
         ASSERT(STRING_STARTS_WITH(format, "off12"));
         out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
                                              "%d", instr->Offset12Field());
         return 5;
+      } else if ((format[3] == '1') && (format[4] == '6')) {
+        ASSERT(STRING_STARTS_WITH(format, "off16to20"));
+        out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                           "%d", instr->Bits(20, 16) +1);
+        return 9;
+      } else if (format[3] == '7') {
+        ASSERT(STRING_STARTS_WITH(format, "off7to11"));
+        out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                            "%d", instr->ShiftAmountField());
+        return 8;
       }
       // 'off8: 8-bit offset for extra load and store instructions
       ASSERT(STRING_STARTS_WITH(format, "off8"));
@@ -795,7 +805,18 @@
       break;
     }
     case 3: {
-      Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+      if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
+        uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
+        uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
+        uint32_t msbit = widthminus1 + lsbit;
+        if (msbit <= 31) {
+          Format(instr, "ubfx'cond 'rd, 'rm, #'off7to11, #'off16to20");
+        } else {
+          UNREACHABLE();
+        }
+      } else {
+        Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+      }
       break;
     }
     default: {
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
index 1aeea7a..a07b0d2 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -35,78 +35,142 @@
 
 #define __ ACCESS_MASM(masm())
 
-void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+Register FastCodeGenerator::accumulator0() { return r0; }
+Register FastCodeGenerator::accumulator1() { return r1; }
+Register FastCodeGenerator::scratch0() { return r3; }
+Register FastCodeGenerator::scratch1() { return r4; }
+Register FastCodeGenerator::receiver_reg() { return r2; }
+Register FastCodeGenerator::context_reg() { return cp; }
+
+
+void FastCodeGenerator::EmitLoadReceiver() {
   // Offset 2 is due to return address and saved frame pointer.
-  int index = 2 + function()->scope()->num_parameters();
-  __ ldr(reg, MemOperand(sp, index * kPointerSize));
+  int index = 2 + scope()->num_parameters();
+  __ ldr(receiver_reg(), MemOperand(sp, index * kPointerSize));
 }
 
 
-void FastCodeGenerator::EmitReceiverMapCheck() {
-  Comment cmnt(masm(), ";; MapCheck(this)");
-  if (FLAG_print_ir) {
-    PrintF("MapCheck(this)\n");
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
+  ASSERT(!destination().is(no_reg));
+  ASSERT(cell->IsJSGlobalPropertyCell());
+
+  __ mov(destination(), Operand(cell));
+  __ ldr(destination(),
+         FieldMemOperand(destination(), JSGlobalPropertyCell::kValueOffset));
+  if (FLAG_debug_code) {
+    __ mov(ip, Operand(Factory::the_hole_value()));
+    __ cmp(destination(), ip);
+    __ Check(ne, "DontDelete cells can't contain the hole");
   }
 
-  EmitLoadReceiver(r1);
-  __ BranchOnSmi(r1, bailout());
-
-  ASSERT(has_receiver() && receiver()->IsHeapObject());
-  Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
-  Handle<Map> map(object->map());
-  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ mov(ip, Operand(map));
-  __ cmp(r3, ip);
-  __ b(ne, bailout());
-}
-
-
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
-  // Compile global variable accesses as load IC calls.  The only live
-  // registers are cp (context) and possibly r1 (this).  Both are also saved
-  // in the stack and cp is preserved by the call.
-  __ ldr(ip, CodeGenerator::GlobalObject());
-  __ push(ip);
-  __ mov(r2, Operand(name));
-  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-  if (has_this_properties()) {
-    // Restore this.
-    EmitLoadReceiver(r1);
-  }
+  // The loaded value is not known to be a smi.
+  clear_as_smi(destination());
 }
 
 
 void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
   LookupResult lookup;
-  receiver()->Lookup(*name, &lookup);
+  info()->receiver()->Lookup(*name, &lookup);
 
-  ASSERT(lookup.holder() == *receiver());
+  ASSERT(lookup.holder() == *info()->receiver());
   ASSERT(lookup.type() == FIELD);
-  Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+  Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
   int index = lookup.GetFieldIndex() - map->inobject_properties();
   int offset = index * kPointerSize;
 
+  // We will emit the write barrier unless the stored value is statically
+  // known to be a smi.
+  bool needs_write_barrier = !is_smi(accumulator0());
+
   // Negative offsets are inobject properties.
   if (offset < 0) {
     offset += map->instance_size();
-    __ mov(r2, r1);  // Copy receiver for write barrier.
+    __ str(accumulator0(), FieldMemOperand(receiver_reg(), offset));
+    if (needs_write_barrier) {
+      // Preserve receiver from write barrier.
+      __ mov(scratch0(), receiver_reg());
+    }
   } else {
     offset += FixedArray::kHeaderSize;
-    __ ldr(r2, FieldMemOperand(r1, JSObject::kPropertiesOffset));
+    __ ldr(scratch0(),
+           FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
+    __ str(accumulator0(), FieldMemOperand(scratch0(), offset));
   }
-  // Perform the store.
-  __ str(r0, FieldMemOperand(r2, offset));
-  __ mov(r3, Operand(offset));
-  __ RecordWrite(r2, r3, ip);
+
+  if (needs_write_barrier) {
+    __ mov(scratch1(), Operand(offset));
+    __ RecordWrite(scratch0(), scratch1(), ip);
+  }
+
+  if (destination().is(accumulator1())) {
+    __ mov(accumulator1(), accumulator0());
+    if (is_smi(accumulator0())) {
+      set_as_smi(accumulator1());
+    } else {
+      clear_as_smi(accumulator1());
+    }
+  }
 }
 
 
-void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
-  ASSERT(function_ == NULL);
+void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
+  ASSERT(!destination().is(no_reg));
+  LookupResult lookup;
+  info()->receiver()->Lookup(*name, &lookup);
+
+  ASSERT(lookup.holder() == *info()->receiver());
+  ASSERT(lookup.type() == FIELD);
+  Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
+  int index = lookup.GetFieldIndex() - map->inobject_properties();
+  int offset = index * kPointerSize;
+
+  // Perform the load.  Negative offsets are inobject properties.
+  if (offset < 0) {
+    offset += map->instance_size();
+    __ ldr(destination(), FieldMemOperand(receiver_reg(), offset));
+  } else {
+    offset += FixedArray::kHeaderSize;
+    __ ldr(scratch0(),
+           FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
+    __ ldr(destination(), FieldMemOperand(scratch0(), offset));
+  }
+
+  // The loaded value is not known to be a smi.
+  clear_as_smi(destination());
+}
+
+
+void FastCodeGenerator::EmitBitOr() {
+  if (is_smi(accumulator0()) && is_smi(accumulator1())) {
+    // If both operands are known to be a smi then there is no need to check
+    // the operands or result.  There is no need to perform the operation in
+    // an effect context.
+    if (!destination().is(no_reg)) {
+      __ orr(destination(), accumulator1(), Operand(accumulator0()));
+    }
+  } else if (destination().is(no_reg)) {
+    // Result is not needed but do not clobber the operands in case of
+    // bailout.
+    __ orr(scratch0(), accumulator1(), Operand(accumulator0()));
+    __ BranchOnNotSmi(scratch0(), bailout());
+  } else {
+    // Preserve the destination operand in a scratch register in case of
+    // bailout.
+    __ mov(scratch0(), destination());
+    __ orr(destination(), accumulator1(), Operand(accumulator0()));
+    __ BranchOnNotSmi(destination(), bailout());
+  }
+
+  // If we didn't bailout, the result (in fact, both inputs too) is known to
+  // be a smi.
+  set_as_smi(accumulator0());
+  set_as_smi(accumulator1());
+}
+
+
+void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
   ASSERT(info_ == NULL);
-  function_ = fun;
-  info_ = info;
+  info_ = compilation_info;
 
   // Save the caller's frame pointer and set up our own.
   Comment prologue_cmnt(masm(), ";; Prologue");
@@ -115,18 +179,42 @@
   // Note that we keep a live register reference to cp (context) at
   // this point.
 
-  // Receiver (this) is allocated to r1 if there are this properties.
-  if (has_this_properties()) EmitReceiverMapCheck();
+  // Receiver (this) is allocated to a fixed register.
+  if (info()->has_this_properties()) {
+    Comment cmnt(masm(), ";; MapCheck(this)");
+    if (FLAG_print_ir) {
+      PrintF("MapCheck(this)\n");
+    }
+    ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
+    Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
+    Handle<Map> map(object->map());
+    EmitLoadReceiver();
+    __ CheckMap(receiver_reg(), scratch0(), map, bailout(), false);
+  }
 
-  VisitStatements(fun->body());
+  // If there is a global variable access check if the global object is the
+  // same as at lazy-compilation time.
+  if (info()->has_globals()) {
+    Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
+    if (FLAG_print_ir) {
+      PrintF("MapCheck(GLOBAL)\n");
+    }
+    ASSERT(info()->has_global_object());
+    Handle<Map> map(info()->global_object()->map());
+    __ ldr(scratch0(), CodeGenerator::GlobalObject());
+    __ CheckMap(scratch0(), scratch1(), map, bailout(), true);
+  }
+
+  VisitStatements(function()->body());
 
   Comment return_cmnt(masm(), ";; Return(<undefined>)");
+  if (FLAG_print_ir) {
+    PrintF("Return(<undefined>)\n");
+  }
   __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-
-  Comment epilogue_cmnt(masm(), ";; Epilogue");
   __ mov(sp, fp);
   __ ldm(ia_w, sp, fp.bit() | lr.bit());
-  int32_t sp_delta = (fun->scope()->num_parameters() + 1) * kPointerSize;
+  int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
   __ add(sp, sp, Operand(sp_delta));
   __ Jump(lr);
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 9f240dd..4896373 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -52,12 +52,13 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-arm.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
-  function_ = fun;
-  SetFunctionPosition(fun);
+void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+  ASSERT(info_ == NULL);
+  info_ = info;
+  SetFunctionPosition(function());
 
   if (mode == PRIMARY) {
-    int locals_count = fun->scope()->num_stack_slots();
+    int locals_count = scope()->num_stack_slots();
 
     __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
     if (locals_count > 0) {
@@ -77,7 +78,7 @@
     bool function_in_register = true;
 
     // Possibly allocate a local context.
-    if (fun->scope()->num_heap_slots() > 0) {
+    if (scope()->num_heap_slots() > 0) {
       Comment cmnt(masm_, "[ Allocate local context");
       // Argument to NewContext is the function, which is in r1.
       __ push(r1);
@@ -87,9 +88,9 @@
       // passed to us.  It's saved in the stack and kept live in cp.
       __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       // Copy any necessary parameters into the context.
-      int num_parameters = fun->scope()->num_parameters();
+      int num_parameters = scope()->num_parameters();
       for (int i = 0; i < num_parameters; i++) {
-        Slot* slot = fun->scope()->parameter(i)->slot();
+        Slot* slot = scope()->parameter(i)->slot();
         if (slot != NULL && slot->type() == Slot::CONTEXT) {
           int parameter_offset = StandardFrameConstants::kCallerSPOffset +
                                    (num_parameters - 1 - i) * kPointerSize;
@@ -107,7 +108,7 @@
       }
     }
 
-    Variable* arguments = fun->scope()->arguments()->AsVariable();
+    Variable* arguments = scope()->arguments()->AsVariable();
     if (arguments != NULL) {
       // Function uses arguments object.
       Comment cmnt(masm_, "[ Allocate arguments object");
@@ -118,9 +119,10 @@
         __ mov(r3, r1);
       }
       // Receiver is just before the parameters on the caller's stack.
-      __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
-                             fun->num_parameters() * kPointerSize));
-      __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
+      int offset = scope()->num_parameters() * kPointerSize;
+      __ add(r2, fp,
+             Operand(StandardFrameConstants::kCallerSPOffset + offset));
+      __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
       __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
 
       // Arguments to ArgumentsAccessStub:
@@ -133,7 +135,7 @@
       __ mov(r3, r0);
       Move(arguments->slot(), r0, r1, r2);
       Slot* dot_arguments_slot =
-          fun->scope()->arguments_shadow()->AsVariable()->slot();
+          scope()->arguments_shadow()->AsVariable()->slot();
       Move(dot_arguments_slot, r3, r1, r2);
     }
   }
@@ -155,7 +157,7 @@
   }
 
   { Comment cmnt(masm_, "[ Declarations");
-    VisitDeclarations(fun->scope()->declarations());
+    VisitDeclarations(scope()->declarations());
   }
 
   if (FLAG_trace) {
@@ -164,7 +166,7 @@
 
   { Comment cmnt(masm_, "[ Body");
     ASSERT(loop_depth() == 0);
-    VisitStatements(fun->body());
+    VisitStatements(function()->body());
     ASSERT(loop_depth() == 0);
   }
 
@@ -173,7 +175,7 @@
     // body.
     __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
   }
-  EmitReturnSequence(function_->end_position());
+  EmitReturnSequence(function()->end_position());
 }
 
 
@@ -196,7 +198,7 @@
 
     // Calculate the exact length of the return sequence and make sure that
     // the constant pool is not emitted inside of the return sequence.
-    int num_parameters = function_->scope()->num_parameters();
+    int num_parameters = scope()->num_parameters();
     int32_t sp_delta = (num_parameters + 1) * kPointerSize;
     int return_sequence_length = Assembler::kJSReturnSequenceLength;
     if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
@@ -512,7 +514,7 @@
       return MemOperand(fp, SlotOffset(slot));
     case Slot::CONTEXT: {
       int context_chain_length =
-          function_->scope()->ContextChainLength(slot->var()->scope());
+          scope()->ContextChainLength(slot->var()->scope());
       __ LoadContext(scratch, context_chain_length);
       return CodeGenerator::ContextOperand(scratch, slot->index());
     }
@@ -572,7 +574,7 @@
         // this specific context.
 
         // The variable in the decl always resides in the current context.
-        ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+        ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
         if (FLAG_debug_code) {
           // Check if we have the correct context pointer.
           __ ldr(r1,
@@ -652,7 +654,7 @@
   // Call the runtime to declare the globals.
   // The context is the first argument.
   __ mov(r1, Operand(pairs));
-  __ mov(r0, Operand(Smi::FromInt(is_eval_ ? 1 : 0)));
+  __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
   __ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
   __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
@@ -664,7 +666,7 @@
 
   // Build the function boilerplate and instantiate it.
   Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(expr, script_, this);
+      Compiler::BuildBoilerplate(expr, script(), this);
   if (HasStackOverflow()) return;
 
   ASSERT(boilerplate->IsBoilerplate());
@@ -814,9 +816,9 @@
         if (key->handle()->IsSymbol()) {
           VisitForValue(value, kAccumulator);
           __ mov(r2, Operand(key->handle()));
+          __ ldr(r1, MemOperand(sp));
           Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
           __ Call(ic, RelocInfo::CODE_TARGET);
-          // StoreIC leaves the receiver on the stack.
           break;
         }
         // Fall through.
@@ -905,6 +907,92 @@
 }
 
 
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+  Comment cmnt(masm_, "[ Assignment");
+  ASSERT(expr->op() != Token::INIT_CONST);
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->target()->AsProperty();
+  if (prop != NULL) {
+    assign_type =
+        (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+  }
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      if (expr->is_compound()) {
+        // We need the receiver both on the stack and in the accumulator.
+        VisitForValue(prop->obj(), kAccumulator);
+        __ push(result_register());
+      } else {
+        VisitForValue(prop->obj(), kStack);
+      }
+      break;
+    case KEYED_PROPERTY:
+      VisitForValue(prop->obj(), kStack);
+      VisitForValue(prop->key(), kStack);
+      break;
+  }
+
+  // If we have a compound assignment: Get value of LHS expression and
+  // store in on top of the stack.
+  if (expr->is_compound()) {
+    Location saved_location = location_;
+    location_ = kStack;
+    switch (assign_type) {
+      case VARIABLE:
+        EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
+                         Expression::kValue);
+        break;
+      case NAMED_PROPERTY:
+        EmitNamedPropertyLoad(prop);
+        __ push(result_register());
+        break;
+      case KEYED_PROPERTY:
+        EmitKeyedPropertyLoad(prop);
+        __ push(result_register());
+        break;
+    }
+    location_ = saved_location;
+  }
+
+  // Evaluate RHS expression.
+  Expression* rhs = expr->value();
+  VisitForValue(rhs, kAccumulator);
+
+  // If we have a compound assignment: Apply operator.
+  if (expr->is_compound()) {
+    Location saved_location = location_;
+    location_ = kAccumulator;
+    EmitBinaryOp(expr->binary_op(), Expression::kValue);
+    location_ = saved_location;
+  }
+
+  // Record source position before possible IC call.
+  SetSourcePosition(expr->position());
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE:
+      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             context_);
+      break;
+    case NAMED_PROPERTY:
+      EmitNamedPropertyAssignment(expr);
+      break;
+    case KEYED_PROPERTY:
+      EmitKeyedPropertyAssignment(expr);
+      break;
+  }
+}
+
+
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
@@ -943,21 +1031,17 @@
     ASSERT(!var->is_this());
     // Assignment to a global variable.  Use inline caching for the
     // assignment.  Right-hand-side value is passed in r0, variable name in
-    // r2, and the global object on the stack.
+    // r2, and the global object in r1.
     __ mov(r2, Operand(var->name()));
-    __ ldr(ip, CodeGenerator::GlobalObject());
-    __ push(ip);
+    __ ldr(r1, CodeGenerator::GlobalObject());
     Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET);
-    // Overwrite the global object on the stack with the result if needed.
-    DropAndApply(1, context, r0);
 
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
     __ push(result_register());  // Value.
     __ mov(r1, Operand(var->name()));
     __ stm(db_w, sp, cp.bit() | r1.bit());  // Context and name.
     __ CallRuntime(Runtime::kStoreContextSlot, 3);
-    Apply(context, r0);
 
   } else if (var->slot() != NULL) {
     Slot* slot = var->slot();
@@ -984,13 +1068,13 @@
         UNREACHABLE();
         break;
     }
-    Apply(context, result_register());
 
   } else {
     // Variables rewritten as properties are not treated as variables in
     // assignments.
     UNREACHABLE();
   }
+  Apply(context, result_register());
 }
 
 
@@ -1014,6 +1098,12 @@
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+  if (expr->ends_initialization_block()) {
+    __ ldr(r1, MemOperand(sp));
+  } else {
+    __ pop(r1);
+  }
+
   Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
   __ Call(ic, RelocInfo::CODE_TARGET);
 
@@ -1024,9 +1114,10 @@
     __ push(ip);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(r0);
+    DropAndApply(1, context_, r0);
+  } else {
+    Apply(context_, r0);
   }
-
-  DropAndApply(1, context_, r0);
 }
 
 
@@ -1085,7 +1176,7 @@
 }
 
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
-                                       Handle<Object> ignored,
+                                       Handle<Object> name,
                                        RelocInfo::Mode mode) {
   // Code common for calls using the IC.
   ZoneList<Expression*>* args = expr->arguments();
@@ -1093,16 +1184,16 @@
   for (int i = 0; i < arg_count; i++) {
     VisitForValue(args->at(i), kStack);
   }
+  __ mov(r2, Operand(name));
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   // Call the IC initialization code.
-  Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
-                                                         NOT_IN_LOOP);
+  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+  Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
   __ Call(ic, mode);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  // Discard the function left on TOS.
-  DropAndApply(1, context_, r0);
+  Apply(context_, r0);
 }
 
 
@@ -1119,7 +1210,6 @@
   __ CallStub(&stub);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  // Discard the function left on TOS.
   DropAndApply(1, context_, r0);
 }
 
@@ -1133,11 +1223,9 @@
     // Call to the identifier 'eval'.
     UNREACHABLE();
   } else if (var != NULL && !var->is_this() && var->is_global()) {
-    // Call to a global variable.
-    __ mov(r1, Operand(var->name()));
-    // Push global object as receiver for the call IC lookup.
+    // Push global object as receiver for the call IC.
     __ ldr(r0, CodeGenerator::GlobalObject());
-    __ stm(db_w, sp, r1.bit() | r0.bit());
+    __ push(r0);
     EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
   } else if (var != NULL && var->slot() != NULL &&
              var->slot()->type() == Slot::LOOKUP) {
@@ -1149,8 +1237,6 @@
     Literal* key = prop->key()->AsLiteral();
     if (key != NULL && key->handle()->IsSymbol()) {
       // Call to a named property, use call IC.
-      __ mov(r0, Operand(key->handle()));
-      __ push(r0);
       VisitForValue(prop->obj(), kStack);
       EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
     } else {
@@ -1236,10 +1322,9 @@
 
   if (expr->is_jsruntime()) {
     // Prepare for calling JS runtime function.
-    __ mov(r1, Operand(expr->name()));
     __ ldr(r0, CodeGenerator::GlobalObject());
     __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
-    __ stm(db_w, sp, r1.bit() | r0.bit());
+    __ push(r0);
   }
 
   // Push the arguments ("left-to-right").
@@ -1250,18 +1335,17 @@
 
   if (expr->is_jsruntime()) {
     // Call the JS runtime function.
+    __ mov(r2, Operand(expr->name()));
     Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
                                                            NOT_IN_LOOP);
     __ Call(ic, RelocInfo::CODE_TARGET);
     // Restore context register.
     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-    // Discard the function left on TOS.
-    DropAndApply(1, context_, r0);
   } else {
     // Call the C runtime function.
     __ CallRuntime(expr->function(), arg_count);
-    Apply(context_, r0);
   }
+  Apply(context_, r0);
 }
 
 
@@ -1546,15 +1630,15 @@
       break;
     case NAMED_PROPERTY: {
       __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+      __ pop(r1);
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
       __ Call(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
-        __ Drop(1);  // Result is on the stack under the receiver.
         if (context_ != Expression::kEffect) {
           ApplyTOS(context_);
         }
       } else {
-        DropAndApply(1, context_, r0);
+        Apply(context_, r0);
       }
       break;
     }
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index bae1e96..7ddb338 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -59,7 +59,7 @@
   // r3 - used as temporary and to hold the capacity of the property
   //      dictionary.
   //
-  // r2 - holds the name of the property and is unchanges.
+  // r2 - holds the name of the property and is unchanged.
 
   Label done;
 
@@ -190,7 +190,7 @@
 
   __ ldr(r0, MemOperand(sp, 0));
 
-  StubCompiler::GenerateLoadStringLength2(masm, r0, r1, r3, &miss);
+  StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
   // Cache miss: Jump to runtime.
   __ bind(&miss);
   StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -219,14 +219,13 @@
 
 void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
-  //  -- lr: return address
+  //  -- r2    : name
+  //  -- lr    : return address
   // -----------------------------------
   Label number, non_number, non_string, boolean, probe, miss;
 
   // Get the receiver of the function from the stack into r1.
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-  // Get the name of the function from the stack; 1 ~ receiver.
-  __ ldr(r2, MemOperand(sp, (argc + 1) * kPointerSize));
 
   // Probe the stub cache.
   Code::Flags flags =
@@ -301,9 +300,9 @@
 
   // Patch the receiver with the global proxy if necessary.
   if (is_global_object) {
-    __ ldr(r2, MemOperand(sp, argc * kPointerSize));
-    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
-    __ str(r2, MemOperand(sp, argc * kPointerSize));
+    __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+    __ str(r0, MemOperand(sp, argc * kPointerSize));
   }
 
   // Invoke the function.
@@ -314,14 +313,13 @@
 
 void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
-  //  -- lr: return address
+  //  -- r2    : name
+  //  -- lr    : return address
   // -----------------------------------
   Label miss, global_object, non_global_object;
 
   // Get the receiver of the function from the stack into r1.
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-  // Get the name of the function from the stack; 1 ~ receiver.
-  __ ldr(r2, MemOperand(sp, (argc + 1) * kPointerSize));
 
   // Check that the receiver isn't a smi.
   __ tst(r1, Operand(kSmiTagMask));
@@ -374,18 +372,17 @@
 
 void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
-  //  -- lr: return address
+  //  -- r2    : name
+  //  -- lr    : return address
   // -----------------------------------
 
   // Get the receiver of the function from the stack.
-  __ ldr(r2, MemOperand(sp, argc * kPointerSize));
-  // Get the name of the function to call from the stack.
-  __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
+  __ ldr(r3, MemOperand(sp, argc * kPointerSize));
 
   __ EnterInternalFrame();
 
   // Push the receiver and the name of the function.
-  __ stm(db_w, sp, r1.bit() | r2.bit());
+  __ stm(db_w, sp, r2.bit() | r3.bit());
 
   // Call the entry.
   __ mov(r0, Operand(2));
@@ -438,7 +435,7 @@
   StubCache::GenerateProbe(masm, flags, r0, r2, r3, no_reg);
 
   // Cache miss: Jump to runtime.
-  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+  GenerateMiss(masm);
 }
 
 
@@ -482,16 +479,11 @@
 
   // Cache miss: Restore receiver from stack and jump to runtime.
   __ bind(&miss);
-  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+  GenerateMiss(masm);
 }
 
 
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
-}
-
-
-void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -502,7 +494,7 @@
   __ stm(db_w, sp, r2.bit() | r3.bit());
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(f, 2, 1);
+  __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
 }
 
 
@@ -530,11 +522,6 @@
 
 
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
-}
-
-
-void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
   // ---------- S t a t e --------------
   //  -- lr     : return address
   //  -- sp[0]  : key
@@ -544,7 +531,21 @@
   __ ldm(ia, sp, r2.bit() | r3.bit());
   __ stm(db_w, sp, r2.bit() | r3.bit());
 
-  __ TailCallRuntime(f, 2, 1);
+  __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[4]  : receiver
+  // -----------------------------------
+
+  __ ldm(ia, sp, r2.bit() | r3.bit());
+  __ stm(db_w, sp, r2.bit() | r3.bit());
+
+  __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2, 1);
 }
 
 
@@ -558,17 +559,11 @@
 
   // Get the key and receiver object from the stack.
   __ ldm(ia, sp, r0.bit() | r1.bit());
-  // Check that the key is a smi.
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(ne, &slow);
-  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-  // Check that the object isn't a smi.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &slow);
 
+  // Check that the object isn't a smi.
+  __ BranchOnSmi(r1, &slow);
   // Get the map of the receiver.
   __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-
   // Check bit field.
   __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
   __ tst(r3, Operand(kSlowCaseBitFieldMask));
@@ -582,6 +577,10 @@
   __ cmp(r2, Operand(JS_OBJECT_TYPE));
   __ b(lt, &slow);
 
+  // Check that the key is a smi.
+  __ BranchOnNotSmi(r0, &slow);
+  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+
   // Get the elements array of the object.
   __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
   // Check that the object is in fast mode (not dictionary).
@@ -597,10 +596,7 @@
   // Slow case: Push extra copies of the arguments (2).
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
-  __ ldm(ia, sp, r0.bit() | r1.bit());
-  __ stm(db_w, sp, r0.bit() | r1.bit());
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2, 1);
+  GenerateRuntimeGetProperty(masm);
 
   // Fast case: Do the load.
   __ bind(&fast);
@@ -634,8 +630,47 @@
 }
 
 
-void KeyedStoreIC::Generate(MacroAssembler* masm,
-                            const ExternalReference& f) {
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[4]  : receiver
+  // -----------------------------------
+  Label slow;
+
+  // Get the key and receiver object from the stack.
+  __ ldm(ia, sp, r0.bit() | r1.bit());
+
+  // Check that the receiver isn't a smi.
+  __ BranchOnSmi(r1, &slow);
+
+  // Check that the key is a smi.
+  __ BranchOnNotSmi(r0, &slow);
+
+  // Get the map of the receiver.
+  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+
+  // Check that it has indexed interceptor and access checks
+  // are not enabled for this object.
+  __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+  __ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
+  __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
+  __ b(ne, &slow);
+
+  // Everything is fine, call runtime.
+  __ push(r1);  // receiver
+  __ push(r0);  // key
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(ExternalReference(
+        IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
+
+  __ bind(&slow);
+  GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- r0     : value
   //  -- lr     : return address
@@ -646,7 +681,21 @@
   __ ldm(ia, sp, r2.bit() | r3.bit());
   __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
 
-  __ TailCallRuntime(f, 3, 1);
+  __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- r0     : value
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[1]  : receiver
+  // -----------------------------------
+  __ ldm(ia, sp, r1.bit() | r3.bit());  // r0 == value, r1 == key, r3 == object
+  __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
+
+  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
 }
 
 
@@ -701,12 +750,9 @@
   __ b(lo, &fast);
 
 
-  // Slow case: Push extra copies of the arguments (3).
+  // Slow case:
   __ bind(&slow);
-  __ ldm(ia, sp, r1.bit() | r3.bit());  // r0 == value, r1 == key, r3 == object
-  __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+  GenerateRuntimeSetProperty(masm);
 
   // Extra capacity case: Check if there is extra capacity to
   // perform the store and update the length. Used for adding one
@@ -777,33 +823,15 @@
 }
 
 
-void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- r0     : value
-  //  -- lr     : return address
-  //  -- sp[0]  : key
-  //  -- sp[1]  : receiver
-  // ----------- S t a t e -------------
-
-  __ ldm(ia, sp, r2.bit() | r3.bit());
-  __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(
-      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
-}
-
-
 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r0    : value
+  //  -- r1    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
 
   // Get the receiver from the stack and probe the stub cache.
-  __ ldr(r1, MemOperand(sp));
   Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
                                          NOT_IN_LOOP,
                                          MONOMORPHIC);
@@ -814,39 +842,69 @@
 }
 
 
-void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r0    : value
-  //  -- r2    : name
-  //  -- lr    : return address
-  //  -- [sp]  : receiver
-  // -----------------------------------
-
-  __ ldr(r3, MemOperand(sp));  // copy receiver
-  __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(
-      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
-}
-
-
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r0    : value
+  //  -- r1    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
 
-  __ ldr(r3, MemOperand(sp));  // copy receiver
-  __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
+  __ push(r1);
+  __ stm(db_w, sp, r2.bit() | r0.bit());
 
   // Perform tail call to the entry.
   __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
 }
 
 
+void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : receiver
+  //  -- r2    : name
+  //  -- lr    : return address
+  // -----------------------------------
+  //
+  // This accepts as a receiver anything JSObject::SetElementsLength accepts
+  // (currently anything except for external and pixel arrays which means
+  // anything with elements of FixedArray type.), but currently is restricted
+  // to JSArray.
+  // Value must be a number, but only smis are accepted as the most common case.
+
+  Label miss;
+
+  Register receiver = r1;
+  Register value = r0;
+  Register scratch = r3;
+
+  // Check that the receiver isn't a smi.
+  __ BranchOnSmi(receiver, &miss);
+
+  // Check that the object is a JS array.
+  __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
+  __ b(ne, &miss);
+
+  // Check that elements are FixedArray.
+  __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+  __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
+  __ b(ne, &miss);
+
+  // Check that value is a smi.
+  __ BranchOnNotSmi(value, &miss);
+
+  // Prepare tail call to StoreIC_ArrayLength.
+  __ push(receiver);
+  __ push(value);
+
+  __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1);
+
+  __ bind(&miss);
+
+  GenerateMiss(masm);
+}
+
+
 #undef __
 
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index b39404e..b249d69 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -37,7 +37,6 @@
 
 MacroAssembler::MacroAssembler(void* buffer, int size)
     : Assembler(buffer, size),
-      unresolved_(0),
       generating_stub_(false),
       allow_stub_calls_(true),
       code_object_(Heap::undefined_value()) {
@@ -196,7 +195,7 @@
 void MacroAssembler::LoadRoot(Register destination,
                               Heap::RootListIndex index,
                               Condition cond) {
-  ldr(destination, MemOperand(r10, index << kPointerSizeLog2), cond);
+  ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
 }
 
 
@@ -331,14 +330,10 @@
 
   // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
   stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
-  mov(fp, Operand(sp));  // setup new frame pointer
+  mov(fp, Operand(sp));  // Setup new frame pointer.
 
-  if (mode == ExitFrame::MODE_DEBUG) {
-    mov(ip, Operand(Smi::FromInt(0)));
-  } else {
-    mov(ip, Operand(CodeObject()));
-  }
-  push(ip);
+  mov(ip, Operand(CodeObject()));
+  push(ip);  // Accessed from ExitFrame::code_slot.
 
   // Save the frame pointer and the context in top.
   mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
@@ -550,6 +545,21 @@
 }
 
 
+void MacroAssembler::InvokeFunction(JSFunction* function,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag) {
+  ASSERT(function->is_compiled());
+
+  // Get the function and setup the context.
+  mov(r1, Operand(Handle<JSFunction>(function)));
+  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+  // Invoke the cached code.
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+}
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void MacroAssembler::SaveRegistersToMemory(RegList regs) {
   ASSERT((regs & ~kJSCallerSaved) == 0);
@@ -608,6 +618,15 @@
     }
   }
 }
+
+
+void MacroAssembler::DebugBreak() {
+  ASSERT(allow_stub_calls());
+  mov(r0, Operand(0));
+  mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
+  CEntryStub ces(1);
+  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
 #endif
 
 
@@ -940,6 +959,113 @@
 }
 
 
+void MacroAssembler::AllocateTwoByteString(Register result,
+                                           Register length,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string while
+  // observing object alignment.
+  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  mov(scratch1, Operand(length, LSL, 1));  // Length in bytes, not chars.
+  add(scratch1, scratch1,
+      Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
+  // AllocateInNewSpace expects the size in words, so we can round down
+  // to kObjectAlignment and divide by kPointerSize in the same shift.
+  ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
+  mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
+
+  // Allocate two-byte string in new space.
+  AllocateInNewSpace(scratch1,
+                     result,
+                     scratch2,
+                     scratch3,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  LoadRoot(scratch1, Heap::kStringMapRootIndex);
+  str(length, FieldMemOperand(result, String::kLengthOffset));
+  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+  mov(scratch2, Operand(String::kEmptyHashField));
+  str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+                                         Register length,
+                                         Register scratch1,
+                                         Register scratch2,
+                                         Register scratch3,
+                                         Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string while
+  // observing object alignment.
+  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+  ASSERT(kCharSize == 1);
+  add(scratch1, length,
+      Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
+  // AllocateInNewSpace expects the size in words, so we can round down
+  // to kObjectAlignment and divide by kPointerSize in the same shift.
+  ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
+  mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
+
+  // Allocate ASCII string in new space.
+  AllocateInNewSpace(scratch1,
+                     result,
+                     scratch2,
+                     scratch3,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  LoadRoot(scratch1, Heap::kAsciiStringMapRootIndex);
+  mov(scratch1, Operand(Factory::ascii_string_map()));
+  str(length, FieldMemOperand(result, String::kLengthOffset));
+  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+  mov(scratch2, Operand(String::kEmptyHashField));
+  str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+                                               Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
+  AllocateInNewSpace(ConsString::kSize / kPointerSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+  LoadRoot(scratch1, Heap::kConsStringMapRootIndex);
+  mov(scratch2, Operand(String::kEmptyHashField));
+  str(length, FieldMemOperand(result, String::kLengthOffset));
+  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+  str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+                                             Register length,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Label* gc_required) {
+  AllocateInNewSpace(ConsString::kSize / kPointerSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+  LoadRoot(scratch1, Heap::kConsAsciiStringMapRootIndex);
+  mov(scratch2, Operand(String::kEmptyHashField));
+  str(length, FieldMemOperand(result, String::kLengthOffset));
+  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+  str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+}
+
+
 void MacroAssembler::CompareObjectType(Register function,
                                        Register map,
                                        Register type_reg,
@@ -957,6 +1083,21 @@
 }
 
 
+void MacroAssembler::CheckMap(Register obj,
+                              Register scratch,
+                              Handle<Map> map,
+                              Label* fail,
+                              bool is_heap_object) {
+  if (!is_heap_object) {
+    BranchOnSmi(obj, fail);
+  }
+  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  mov(ip, Operand(map));
+  cmp(scratch, ip);
+  b(ne, fail);
+}
+
+
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
@@ -1010,10 +1151,17 @@
 }
 
 
+void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
+  ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
+  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+}
+
+
 void MacroAssembler::StubReturn(int argc) {
   ASSERT(argc >= 1 && generating_stub());
-  if (argc > 1)
+  if (argc > 1) {
     add(sp, sp, Operand((argc - 1) * kPointerSize));
+  }
   Ret();
 }
 
@@ -1037,6 +1185,18 @@
 }
 
 
+void MacroAssembler::GetLeastBitsFromSmi(Register dst,
+                                         Register src,
+                                         int num_least_bits) {
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    ubfx(dst, src, Operand(kSmiTagSize), Operand(num_least_bits - 1));
+  } else {
+    mov(dst, Operand(src, ASR, kSmiTagSize));
+    and_(dst, dst, Operand((1 << num_least_bits) - 1));
+  }
+}
+
+
 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
   // All parameters are on the stack.  r0 has the return value after call.
 
@@ -1064,6 +1224,16 @@
 }
 
 
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+                                           int num_arguments) {
+  mov(r0, Operand(num_arguments));
+  mov(r1, Operand(ext));
+
+  CEntryStub stub(1);
+  CallStub(&stub);
+}
+
+
 void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
                                      int num_arguments,
                                      int result_size) {
@@ -1087,58 +1257,28 @@
 }
 
 
-Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
-                                            bool* resolved) {
-  // Contract with compiled functions is that the function is passed in r1.
-  int builtins_offset =
-      JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
-  ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  ldr(r1, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
-  ldr(r1, FieldMemOperand(r1, builtins_offset));
-
-  return Builtins::GetCode(id, resolved);
-}
-
-
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeJSFlags flags) {
-  bool resolved;
-  Handle<Code> code = ResolveBuiltin(id, &resolved);
-
+  GetBuiltinEntry(r2, id);
   if (flags == CALL_JS) {
-    Call(code, RelocInfo::CODE_TARGET);
+    Call(r2);
   } else {
     ASSERT(flags == JUMP_JS);
-    Jump(code, RelocInfo::CODE_TARGET);
-  }
-
-  if (!resolved) {
-    const char* name = Builtins::GetName(id);
-    int argc = Builtins::GetArgumentsCount(id);
-    uint32_t flags =
-        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsUseCodeObject::encode(false);
-    Unresolved entry = { pc_offset() - kInstrSize, flags, name };
-    unresolved_.Add(entry);
+    Jump(r2);
   }
 }
 
 
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
-  bool resolved;
-  Handle<Code> code = ResolveBuiltin(id, &resolved);
-
-  mov(target, Operand(code));
-  if (!resolved) {
-    const char* name = Builtins::GetName(id);
-    int argc = Builtins::GetArgumentsCount(id);
-    uint32_t flags =
-        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsUseCodeObject::encode(true);
-    Unresolved entry = { pc_offset() - kInstrSize, flags, name };
-    unresolved_.Add(entry);
-  }
-
+  // Load the JavaScript builtin function from the builtins object.
+  ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  ldr(r1, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
+  int builtins_offset =
+      JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
+  ldr(r1, FieldMemOperand(r1, builtins_offset));
+  // Load the code entry point from the function into the target register.
+  ldr(target, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  ldr(target, FieldMemOperand(target, SharedFunctionInfo::kCodeOffset));
   add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
 }
 
@@ -1238,6 +1378,26 @@
 }
 
 
+void MacroAssembler::JumpIfNotBothSmi(Register reg1,
+                                      Register reg2,
+                                      Label* on_not_both_smi) {
+  ASSERT_EQ(0, kSmiTag);
+  tst(reg1, Operand(kSmiTagMask));
+  tst(reg2, Operand(kSmiTagMask), eq);
+  b(ne, on_not_both_smi);
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register reg1,
+                                     Register reg2,
+                                     Label* on_either_smi) {
+  ASSERT_EQ(0, kSmiTag);
+  tst(reg1, Operand(kSmiTagMask));
+  tst(reg2, Operand(kSmiTagMask), ne);
+  b(eq, on_either_smi);
+}
+
+
 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
     Register first,
     Register second,
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index efc5bfa..98cea16 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -33,10 +33,18 @@
 namespace v8 {
 namespace internal {
 
+// ----------------------------------------------------------------------------
+// Static helper functions
+
+// Generate a MemOperand for loading a field from an object.
+static inline MemOperand FieldMemOperand(Register object, int offset) {
+  return MemOperand(object, offset - kHeapObjectTag);
+}
+
 
 // Give alias names to registers
 const Register cp = { 8 };  // JavaScript context pointer
-
+const Register roots = { 10 };  // Roots array pointer.
 
 enum InvokeJSFlags {
   CALL_JS,
@@ -49,14 +57,7 @@
  public:
   MacroAssembler(void* buffer, int size);
 
-  // ---------------------------------------------------------------------------
-  // Low-level helpers for compiler
-
-  // Jump, Call, and Ret pseudo instructions implementing inter-working
- private:
-  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
-  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
- public:
+  // Jump, Call, and Ret pseudo instructions implementing inter-working.
   void Jump(Register target, Condition cond = al);
   void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
@@ -134,6 +135,10 @@
                       const ParameterCount& actual,
                       InvokeFlag flag);
 
+  void InvokeFunction(JSFunction* function,
+                      const ParameterCount& actual,
+                      InvokeFlag flag);
+
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
@@ -145,6 +150,7 @@
   void CopyRegistersFromStackToMemory(Register base,
                                       Register scratch,
                                       RegList regs);
+  void DebugBreak();
 #endif
 
   // ---------------------------------------------------------------------------
@@ -209,6 +215,31 @@
   // allocation is undone.
   void UndoAllocationInNewSpace(Register object, Register scratch);
 
+
+  void AllocateTwoByteString(Register result,
+                             Register length,
+                             Register scratch1,
+                             Register scratch2,
+                             Register scratch3,
+                             Label* gc_required);
+  void AllocateAsciiString(Register result,
+                           Register length,
+                           Register scratch1,
+                           Register scratch2,
+                           Register scratch3,
+                           Label* gc_required);
+  void AllocateTwoByteConsString(Register result,
+                                 Register length,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Label* gc_required);
+  void AllocateAsciiConsString(Register result,
+                               Register length,
+                               Register scratch1,
+                               Register scratch2,
+                               Label* gc_required);
+
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
@@ -243,6 +274,29 @@
                            Register type_reg,
                            InstanceType type);
 
+
+  // Check if the map of an object is equal to a specified map and
+  // branch to label if not. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void CheckMap(Register obj,
+                Register scratch,
+                Handle<Map> map,
+                Label* fail,
+                bool is_heap_object);
+
+  // Load and check the instance type of an object for being a string.
+  // Loads the type into the second argument register.
+  // Returns a condition that will be enabled if the object was a string.
+  Condition IsObjectStringType(Register obj,
+                               Register type) {
+    ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+    ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+    tst(type, Operand(kIsNotStringMask));
+    ASSERT_EQ(0, kStringTag);
+    return eq;
+  }
+
+
   inline void BranchOnSmi(Register value, Label* smi_label) {
     tst(value, Operand(kSmiTagMask));
     b(eq, smi_label);
@@ -257,6 +311,9 @@
   // occurred.
   void IllegalOperation(int num_arguments);
 
+  // Get the number of least significant bits from a register
+  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+
   // Uses VFP instructions to Convert a Smi to a double.
   void IntegerToDoubleConversionWithVFP3(Register inReg,
                                          Register outHighReg,
@@ -269,6 +326,9 @@
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = al);
 
+  // Call a code stub.
+  void TailCallStub(CodeStub* stub, Condition cond = al);
+
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
 
@@ -279,6 +339,10 @@
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId fid, int num_arguments);
 
+  // Convenience function: call an external reference.
+  void CallExternalReference(const ExternalReference& ext,
+                             int num_arguments);
+
   // Tail call of a runtime routine (jump).
   // Like JumpToRuntime, but also takes care of passing the number
   // of parameters.
@@ -297,13 +361,6 @@
   // setup the function in r1.
   void GetBuiltinEntry(Register target, Builtins::JavaScript id);
 
-  struct Unresolved {
-    int pc;
-    uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
-    const char* name;
-  };
-  List<Unresolved>* unresolved() { return &unresolved_; }
-
   Handle<Object> CodeObject() { return code_object_; }
 
 
@@ -338,6 +395,14 @@
   bool allow_stub_calls() { return allow_stub_calls_; }
 
   // ---------------------------------------------------------------------------
+  // Smi utilities
+
+  // Jump if either of the registers contain a non-smi.
+  void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
+  // Jump if either of the registers contain a smi.
+  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+
+  // ---------------------------------------------------------------------------
   // String utilities
 
   // Checks if both objects are sequential ASCII strings and jumps to label
@@ -357,11 +422,8 @@
                                            Label* not_flat_ascii_strings);
 
  private:
-  List<Unresolved> unresolved_;
-  bool generating_stub_;
-  bool allow_stub_calls_;
-  Handle<Object> code_object_;  // This handle will be patched with the code
-                                // object on installation.
+  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
 
   // Helper functions for generating invokes.
   void InvokePrologue(const ParameterCount& expected,
@@ -371,21 +433,14 @@
                       Label* done,
                       InvokeFlag flag);
 
-  // Prepares for a call or jump to a builtin by doing two things:
-  // 1. Emits code that fetches the builtin's function object from the context
-  //    at runtime, and puts it in the register rdi.
-  // 2. Fetches the builtin's code object, and returns it in a handle, at
-  //    compile time, so that later code can emit instructions to jump or call
-  //    the builtin directly.  If the code object has not yet been created, it
-  //    returns the builtin code object for IllegalFunction, and sets the
-  //    output parameter "resolved" to false.  Code that uses the return value
-  //    should then add the address and the builtin name to the list of fixups
-  //    called unresolved_, which is fixed up by the bootstrapper.
-  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
-
   // Activation support.
   void EnterFrame(StackFrame::Type type);
   void LeaveFrame(StackFrame::Type type);
+
+  bool generating_stub_;
+  bool allow_stub_calls_;
+  // This handle will be patched with the code object on installation.
+  Handle<Object> code_object_;
 };
 
 
@@ -421,12 +476,6 @@
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
-// Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
-  return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
 #ifdef GENERATED_CODE_COVERAGE
 #define CODE_COVERAGE_STRINGIFY(x) #x
 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index f543151..cee5aea 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1741,7 +1741,7 @@
 
 
 void Simulator::DecodeType3(Instr* instr) {
-  ASSERT(instr->Bit(4) == 0);
+  ASSERT(instr->Bits(6, 4) == 0x5 || instr->Bit(4) == 0);
   int rd = instr->RdField();
   int rn = instr->RnField();
   int32_t rn_val = get_register(rn);
@@ -1768,10 +1768,26 @@
       break;
     }
     case 3: {
-      // Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
-      addr = rn_val + shifter_operand;
-      if (instr->HasW()) {
-        set_register(rn, addr);
+      if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
+        uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
+        uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
+        uint32_t msbit = widthminus1 + lsbit;
+        if (msbit <= 31) {
+          uint32_t rm_val =
+              static_cast<uint32_t>(get_register(instr->RmField()));
+          uint32_t extr_val = rm_val << (31 - msbit);
+          extr_val = extr_val >> (31 - widthminus1);
+          set_register(instr->RdField(), extr_val);
+        } else {
+          UNREACHABLE();
+        }
+        return;
+      } else {
+        // Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+        addr = rn_val + shifter_operand;
+        if (instr->HasW()) {
+          set_register(rn, addr);
+        }
       }
       break;
     }
@@ -1785,7 +1801,8 @@
       uint8_t byte = ReadB(addr);
       set_register(rd, byte);
     } else {
-      UNIMPLEMENTED();
+      uint8_t byte = get_register(rd);
+      WriteB(addr, byte);
     }
   } else {
     if (instr->HasL()) {
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index d19a683..da73942 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -189,8 +189,9 @@
 }
 
 
-// Generate code to check if an object is a string.  If the object is
-// a string, the map's instance type is left in the scratch1 register.
+// Generate code to check if an object is a string.  If the object is a
+// heap object, its map's instance type is left in the scratch1 register.
+// If this is not needed, scratch1 and scratch2 may be the same register.
 static void GenerateStringCheck(MacroAssembler* masm,
                                 Register receiver,
                                 Register scratch1,
@@ -215,18 +216,16 @@
 // If the receiver object is not a string or a wrapped string object the
 // execution continues at the miss label. The register containing the
 // receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
-                                             Register receiver,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Label* miss) {
-  Label check_string, check_wrapper;
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+                                            Register receiver,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Label* miss) {
+  Label check_wrapper;
 
-  __ bind(&check_string);
   // Check if the object is a string leaving the instance type in the
   // scratch1 register.
-  GenerateStringCheck(masm, receiver, scratch1, scratch2,
-                      miss, &check_wrapper);
+  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
 
   // Load length directly from the string.
   __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
@@ -238,9 +237,12 @@
   __ cmp(scratch1, Operand(JS_VALUE_TYPE));
   __ b(ne, miss);
 
-  // Unwrap the value in place and check if the wrapped value is a string.
-  __ ldr(receiver, FieldMemOperand(receiver, JSValue::kValueOffset));
-  __ b(&check_string);
+  // Unwrap the value and check if the wrapped value is a string.
+  __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+  GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+  __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
+  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+  __ Ret();
 }
 
 
@@ -256,10 +258,10 @@
 
 
 // Generate StoreField code, value is passed in r0 register.
-// After executing generated code, the receiver_reg and name_reg
-// may be clobbered.
+// When leaving generated code after success, the receiver_reg and name_reg
+// may be clobbered.  Upon branch to miss_label, the receiver and name
+// registers have their original values.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      Builtins::Name storage_extend,
                                       JSObject* object,
                                       int index,
                                       Map* transition,
@@ -292,11 +294,12 @@
   if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
+    __ push(receiver_reg);
     __ mov(r2, Operand(Handle<Map>(transition)));
-    // Please note, if we implement keyed store for arm we need
-    // to call the Builtins::KeyedStoreIC_ExtendStorage.
-    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_ExtendStorage));
-    __ Jump(ic, RelocInfo::CODE_TARGET);
+    __ stm(db_w, sp, r2.bit() | r0.bit());
+    __ TailCallRuntime(
+           ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)),
+           3, 1);
     return;
   }
 
@@ -373,7 +376,7 @@
 
   // Check that the function really is a function.
   __ BranchOnSmi(r1, miss);
-  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+  __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
   __ b(ne, miss);
 
   // Patch the receiver on the stack with the global proxy if
@@ -388,68 +391,6 @@
 }
 
 
-static void GenerateCallConstFunction(MacroAssembler* masm,
-                                      JSFunction* function,
-                                      const ParameterCount& arguments) {
-  ASSERT(function->is_compiled());
-
-  // Get the function and setup the context.
-  __ mov(r1, Operand(Handle<JSFunction>(function)));
-  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
-  // Jump to the cached code (tail call).
-  Handle<Code> code(function->code());
-  ParameterCount expected(function->shared()->formal_parameter_count());
-  __ InvokeCode(code, expected, arguments,
-                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
-}
-
-
-template <class Compiler>
-static void CompileLoadInterceptor(Compiler* compiler,
-                                   StubCompiler* stub_compiler,
-                                   MacroAssembler* masm,
-                                   JSObject* object,
-                                   JSObject* holder,
-                                   String* name,
-                                   LookupResult* lookup,
-                                   Register receiver,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   Label* miss) {
-  ASSERT(holder->HasNamedInterceptor());
-  ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
-  // Check that the receiver isn't a smi.
-  __ BranchOnSmi(receiver, miss);
-
-  // Check that the maps haven't changed.
-  Register reg =
-      stub_compiler->CheckPrototypes(object, receiver, holder,
-                                     scratch1, scratch2, name, miss);
-
-  if (lookup->IsValid() && lookup->IsCacheable()) {
-    compiler->CompileCacheable(masm,
-                               stub_compiler,
-                               receiver,
-                               reg,
-                               scratch1,
-                               scratch2,
-                               holder,
-                               lookup,
-                               name,
-                               miss);
-  } else {
-    compiler->CompileRegular(masm,
-                             receiver,
-                             reg,
-                             scratch2,
-                             holder,
-                             miss);
-  }
-}
-
-
 static void PushInterceptorArguments(MacroAssembler* masm,
                                      Register receiver,
                                      Register holder,
@@ -500,7 +441,7 @@
                         LookupResult* lookup,
                         String* name,
                         Label* miss_label) {
-    AccessorInfo* callback = 0;
+    AccessorInfo* callback = NULL;
     bool optimize = false;
     // So far the most popular follow ups for interceptor loads are FIELD
     // and CALLBACKS, so inline only them, other cases may be added
@@ -523,9 +464,7 @@
     // Note: starting a frame here makes GC aware of pointers pushed below.
     __ EnterInternalFrame();
 
-    if (lookup->type() == CALLBACKS) {
-      __ push(receiver);
-    }
+    __ push(receiver);
     __ push(holder);
     __ push(name_);
 
@@ -546,10 +485,7 @@
     __ bind(&interceptor_failed);
     __ pop(name_);
     __ pop(holder);
-
-    if (lookup->type() == CALLBACKS) {
-      __ pop(receiver);
-    }
+    __ pop(receiver);
 
     __ LeaveInternalFrame();
 
@@ -621,108 +557,48 @@
 };
 
 
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
-  CallInterceptorCompiler(const ParameterCount& arguments, Register name)
-      : arguments_(arguments), argc_(arguments.immediate()), name_(name) {}
+static void CompileLoadInterceptor(LoadInterceptorCompiler* compiler,
+                                   StubCompiler* stub_compiler,
+                                   MacroAssembler* masm,
+                                   JSObject* object,
+                                   JSObject* holder,
+                                   String* name,
+                                   LookupResult* lookup,
+                                   Register receiver,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Label* miss) {
+  ASSERT(holder->HasNamedInterceptor());
+  ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
-  void CompileCacheable(MacroAssembler* masm,
-                        StubCompiler* stub_compiler,
-                        Register receiver,
-                        Register holder,
-                        Register scratch1,
-                        Register scratch2,
-                        JSObject* holder_obj,
-                        LookupResult* lookup,
-                        String* name,
-                        Label* miss_label) {
-    JSFunction* function = 0;
-    bool optimize = false;
-    // So far the most popular case for failed interceptor is
-    // CONSTANT_FUNCTION sitting below.
-    if (lookup->type() == CONSTANT_FUNCTION) {
-      function = lookup->GetConstantFunction();
-      // JSArray holder is a special case for call constant function
-      // (see the corresponding code).
-      if (function->is_compiled() && !holder_obj->IsJSArray()) {
-        optimize = true;
-      }
-    }
+  // Check that the receiver isn't a smi.
+  __ BranchOnSmi(receiver, miss);
 
-    if (!optimize) {
-      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
-      return;
-    }
+  // Check that the maps haven't changed.
+  Register reg =
+      stub_compiler->CheckPrototypes(object, receiver, holder,
+                                     scratch1, scratch2, name, miss);
 
-    // Constant functions cannot sit on global object.
-    ASSERT(!lookup->holder()->IsGlobalObject());
-
-    __ EnterInternalFrame();
-    __ push(holder);  // Save the holder.
-    __ push(name_);  // Save the name.
-
-    CompileCallLoadPropertyWithInterceptor(masm,
-                                           receiver,
-                                           holder,
-                                           name_,
-                                           holder_obj);
-
-    ASSERT(!r0.is(name_));
-    ASSERT(!r0.is(scratch1));
-    __ pop(name_);  // Restore the name.
-    __ pop(scratch1);  // Restore the holder.
-    __ LeaveInternalFrame();
-
-    // Compare with no_interceptor_result_sentinel.
-    __ LoadRoot(scratch2, Heap::kNoInterceptorResultSentinelRootIndex);
-    __ cmp(r0, scratch2);
-    Label invoke;
-    __ b(ne, &invoke);
-
-    stub_compiler->CheckPrototypes(holder_obj, scratch1,
-                                   lookup->holder(), scratch1,
-                                   scratch2,
-                                   name,
-                                   miss_label);
-    GenerateCallConstFunction(masm, function, arguments_);
-
-    __ bind(&invoke);
-  }
-
-  void CompileRegular(MacroAssembler* masm,
-                      Register receiver,
-                      Register holder,
-                      Register scratch,
-                      JSObject* holder_obj,
-                      Label* miss_label) {
-    __ EnterInternalFrame();
-    // Save the name_ register across the call.
-    __ push(name_);
-
-    PushInterceptorArguments(masm,
+  if (lookup->IsProperty() && lookup->IsCacheable()) {
+    compiler->CompileCacheable(masm,
+                               stub_compiler,
+                               receiver,
+                               reg,
+                               scratch1,
+                               scratch2,
+                               holder,
+                               lookup,
+                               name,
+                               miss);
+  } else {
+    compiler->CompileRegular(masm,
                              receiver,
+                             reg,
+                             scratch2,
                              holder,
-                             name_,
-                             holder_obj);
-
-    ExternalReference ref = ExternalReference(
-        IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
-    __ mov(r0, Operand(5));
-    __ mov(r1, Operand(ref));
-
-    CEntryStub stub(1);
-    __ CallStub(&stub);
-
-    // Restore the name_ register.
-    __ pop(name_);
-    __ LeaveInternalFrame();
+                             miss);
   }
-
- private:
-  const ParameterCount& arguments_;
-  int argc_;
-  Register name_;
-};
+}
 
 
 #undef __
@@ -735,7 +611,11 @@
                                        Register holder_reg,
                                        Register scratch,
                                        String* name,
+                                       int save_at_depth,
                                        Label* miss) {
+  // TODO(602): support object saving.
+  ASSERT(save_at_depth == kInvalidProtoDepth);
+
   // Check that the maps haven't changed.
   Register result =
       masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
@@ -762,7 +642,7 @@
     object = JSObject::cast(object->GetPrototype());
   }
 
-  // Return the register containin the holder.
+  // Return the register containing the holder.
   return result;
 }
 
@@ -901,12 +781,13 @@
 }
 
 
-Object* CallStubCompiler::CompileCallField(Object* object,
+Object* CallStubCompiler::CompileCallField(JSObject* object,
                                            JSObject* holder,
                                            int index,
                                            String* name) {
   // ----------- S t a t e -------------
-  //  -- lr: return address
+  //  -- r2    : name
+  //  -- lr    : return address
   // -----------------------------------
   Label miss;
 
@@ -919,8 +800,7 @@
   __ b(eq, &miss);
 
   // Do the right check and compute the holder register.
-  Register reg =
-      CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
+  Register reg = CheckPrototypes(object, r0, holder, r1, r3, name, &miss);
   GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
 
   GenerateCallFunction(masm(), object, arguments(), &miss);
@@ -941,7 +821,8 @@
                                               String* name,
                                               CheckType check) {
   // ----------- S t a t e -------------
-  //  -- lr: return address
+  //  -- r2    : name
+  //  -- lr    : return address
   // -----------------------------------
   Label miss;
 
@@ -962,7 +843,7 @@
   switch (check) {
     case RECEIVER_MAP_CHECK:
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss);
+      CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -978,13 +859,13 @@
         __ jmp(&miss);
       } else {
         // Check that the object is a two-byte string or a symbol.
-        __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+        __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
         __ b(hs, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            r2);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+                                            r0);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
                         r1, name, &miss);
       }
       break;
@@ -998,14 +879,14 @@
         // Check that the object is a smi or a heap number.
         __ tst(r1, Operand(kSmiTagMask));
         __ b(eq, &fast);
-        __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
+        __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
         __ b(ne, &miss);
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateLoadGlobalFunctionPrototype(masm(),
                                             Context::NUMBER_FUNCTION_INDEX,
-                                            r2);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+                                            r0);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
                         r1, name, &miss);
       }
       break;
@@ -1028,22 +909,22 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateLoadGlobalFunctionPrototype(masm(),
                                             Context::BOOLEAN_FUNCTION_INDEX,
-                                            r2);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+                                            r0);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
                         r1, name, &miss);
       }
       break;
     }
 
     case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
-      CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss);
+      CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
       // Make sure object->HasFastElements().
       // Get the elements array of the object.
       __ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
       // Check that the object is in fast mode (not dictionary).
-      __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+      __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
       __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-      __ cmp(r2, ip);
+      __ cmp(r0, ip);
       __ b(ne, &miss);
       break;
 
@@ -1051,7 +932,7 @@
       UNREACHABLE();
   }
 
-  GenerateCallConstFunction(masm(), function, arguments());
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -1067,14 +948,22 @@
 }
 
 
-Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
                                                  JSObject* holder,
                                                  String* name) {
   // ----------- S t a t e -------------
-  //  -- lr: return address
+  //  -- r2    : name
+  //  -- lr    : return address
   // -----------------------------------
+  ASSERT(holder->HasNamedInterceptor());
+  ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
   Label miss;
 
+  const Register receiver = r0;
+  const Register holder_reg = r1;
+  const Register name_reg = r2;
+  const Register scratch = r3;
+
   // Get the number of arguments.
   const int argc = arguments().immediate();
 
@@ -1083,24 +972,79 @@
 
   // Get the receiver from the stack into r0.
   __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-  // Load the name from the stack into r1.
-  __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(arguments(), r1);
-  CompileLoadInterceptor(&compiler,
-                         this,
-                         masm(),
-                         JSObject::cast(object),
-                         holder,
-                         name,
-                         &lookup,
-                         r0,
-                         r2,
-                         r3,
-                         &miss);
+  // Check that the receiver isn't a smi.
+  __ BranchOnSmi(receiver, &miss);
 
+  // Check that the maps haven't changed.
+  Register reg = CheckPrototypes(object, receiver, holder, holder_reg,
+                                 scratch, name, &miss);
+  if (!reg.is(holder_reg)) {
+    __ mov(holder_reg, reg);
+  }
+
+  // If we call a constant function when the interceptor returns
+  // the no-result sentinel, generate code that optimizes this case.
+  if (lookup.IsProperty() &&
+      lookup.IsCacheable() &&
+      lookup.type() == CONSTANT_FUNCTION &&
+      lookup.GetConstantFunction()->is_compiled() &&
+      !holder->IsJSArray()) {
+    // Constant functions cannot sit on global object.
+    ASSERT(!lookup.holder()->IsGlobalObject());
+
+    // Call the interceptor.
+    __ EnterInternalFrame();
+    __ push(holder_reg);
+    __ push(name_reg);
+    CompileCallLoadPropertyWithInterceptor(masm(),
+                                           receiver,
+                                           holder_reg,
+                                           name_reg,
+                                           holder);
+    __ pop(name_reg);
+    __ pop(holder_reg);
+    __ LeaveInternalFrame();
+    // r0 no longer contains the receiver.
+
+    // If interceptor returns no-result sentinal, call the constant function.
+    __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ cmp(r0, scratch);
+    Label invoke;
+    __ b(ne, &invoke);
+    // Check the prototypes between the interceptor's holder and the
+    // constant function's holder.
+    CheckPrototypes(holder, holder_reg,
+                    lookup.holder(), r0,
+                    scratch,
+                    name,
+                    &miss);
+
+    __ InvokeFunction(lookup.GetConstantFunction(),
+                      arguments(),
+                      JUMP_FUNCTION);
+
+    __ bind(&invoke);
+
+  } else {
+    // Call a runtime function to load the interceptor property.
+    __ EnterInternalFrame();
+    __ push(name_reg);
+
+    PushInterceptorArguments(masm(), receiver, holder_reg, name_reg, holder);
+
+    __ CallExternalReference(
+        ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
+        5);
+
+    __ pop(name_reg);
+    __ LeaveInternalFrame();
+  }
+
+  // Move returned value, the function to call, to r1.
+  __ mov(r1, r0);
   // Restore receiver.
-  __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+  __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
 
   GenerateCallFunction(masm(), object, arguments(), &miss);
 
@@ -1120,7 +1064,8 @@
                                             JSFunction* function,
                                             String* name) {
   // ----------- S t a t e -------------
-  //  -- lr: return address
+  //  -- r2    : name
+  //  -- lr    : return address
   // -----------------------------------
   Label miss;
 
@@ -1139,7 +1084,7 @@
   }
 
   // Check that the maps haven't changed.
-  CheckPrototypes(object, r0, holder, r3, r2, name, &miss);
+  CheckPrototypes(object, r0, holder, r3, r1, name, &miss);
 
   // Get the value from the cell.
   __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
@@ -1159,8 +1104,8 @@
 
     // Check the shared function info. Make sure it hasn't changed.
     __ mov(r3, Operand(Handle<SharedFunctionInfo>(function->shared())));
-    __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-    __ cmp(r2, r3);
+    __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+    __ cmp(r4, r3);
     __ b(ne, &miss);
   } else {
     __ cmp(r1, Operand(Handle<JSFunction>(function)));
@@ -1178,7 +1123,7 @@
   __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
   // Jump to the cached code (tail call).
-  __ IncrementCounter(&Counters::call_global_inline, 1, r2, r3);
+  __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3);
   ASSERT(function->is_compiled());
   Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
@@ -1202,25 +1147,19 @@
                                              String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
+  //  -- r1    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
-  // Get the receiver from the stack.
-  __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
-
-  // name register might be clobbered.
   GenerateStoreField(masm(),
-                     Builtins::StoreIC_ExtendStorage,
                      object,
                      index,
                      transition,
-                     r3, r2, r1,
+                     r1, r2, r3,
                      &miss);
   __ bind(&miss);
-  __ mov(r2, Operand(Handle<String>(name)));  // restore name
   Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
@@ -1234,39 +1173,33 @@
                                                 String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
+  //  -- r1    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
-  // Get the object from the stack.
-  __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
-
   // Check that the object isn't a smi.
-  __ tst(r3, Operand(kSmiTagMask));
+  __ tst(r1, Operand(kSmiTagMask));
   __ b(eq, &miss);
 
   // Check that the map of the object hasn't changed.
-  __ ldr(r1, FieldMemOperand(r3, HeapObject::kMapOffset));
-  __ cmp(r1, Operand(Handle<Map>(object->map())));
+  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ cmp(r3, Operand(Handle<Map>(object->map())));
   __ b(ne, &miss);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
-    __ CheckAccessGlobalProxy(r3, r1, &miss);
+    __ CheckAccessGlobalProxy(r1, r3, &miss);
   }
 
   // Stub never generated for non-global objects that require access
   // checks.
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
-  __ ldr(ip, MemOperand(sp));  // receiver
-  __ push(ip);
+  __ push(r1);  // receiver
   __ mov(ip, Operand(Handle<AccessorInfo>(callback)));  // callback info
-  __ push(ip);
-  __ push(r2);  // name
-  __ push(r0);  // value
+  __ stm(db_w, sp, ip.bit() | r2.bit() | r0.bit());
 
   // Do tail-call to the runtime system.
   ExternalReference store_callback_property =
@@ -1287,37 +1220,33 @@
                                                    String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
+  //  -- r1    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
-  // Get the object from the stack.
-  __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
-
   // Check that the object isn't a smi.
-  __ tst(r3, Operand(kSmiTagMask));
+  __ tst(r1, Operand(kSmiTagMask));
   __ b(eq, &miss);
 
   // Check that the map of the object hasn't changed.
-  __ ldr(r1, FieldMemOperand(r3, HeapObject::kMapOffset));
-  __ cmp(r1, Operand(Handle<Map>(receiver->map())));
+  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ cmp(r3, Operand(Handle<Map>(receiver->map())));
   __ b(ne, &miss);
 
   // Perform global security token check if needed.
   if (receiver->IsJSGlobalProxy()) {
-    __ CheckAccessGlobalProxy(r3, r1, &miss);
+    __ CheckAccessGlobalProxy(r1, r3, &miss);
   }
 
-  // Stub never generated for non-global objects that require access
+  // Stub is never generated for non-global objects that require access
   // checks.
   ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
 
-  __ ldr(ip, MemOperand(sp));  // receiver
-  __ push(ip);
-  __ push(r2);  // name
-  __ push(r0);  // value
+  __ push(r1);  // receiver.
+  __ push(r2);  // name.
+  __ push(r0);  // value.
 
   // Do tail-call to the runtime system.
   ExternalReference store_ic_property =
@@ -1339,14 +1268,13 @@
                                               String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
+  //  -- r1    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
   // Check that the map of the global has not changed.
-  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
   __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
   __ cmp(r3, Operand(Handle<Map>(object->map())));
   __ b(ne, &miss);
@@ -1355,12 +1283,12 @@
   __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
   __ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
 
-  __ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3);
+  __ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3);
   __ Ret();
 
   // Handle store cache miss.
   __ bind(&miss);
-  __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r1, r3);
+  __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r4, r3);
   Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
@@ -1672,7 +1600,7 @@
   __ cmp(r2, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  GenerateLoadStringLength2(masm(), r0, r1, r3, &miss);
+  GenerateLoadStringLength(masm(), r0, r1, r3, &miss);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
 
@@ -1717,7 +1645,6 @@
   __ ldr(r3, MemOperand(sp));
   // r1 is used as scratch register, r3 and r2 might be clobbered.
   GenerateStoreField(masm(),
-                     Builtins::StoreIC_ExtendStorage,
                      object,
                      index,
                      transition,
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 7a8ac72..0f7c597 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -47,7 +47,7 @@
     : elements_(parameter_count() + local_count() + kPreallocatedElements),
       stack_pointer_(parameter_count()) {  // 0-based index of TOS.
   for (int i = 0; i <= stack_pointer_; i++) {
-    elements_.Add(FrameElement::MemoryElement());
+    elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
   }
   for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
     register_locations_[i] = kIllegalIndex;
@@ -233,6 +233,14 @@
 }
 
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void VirtualFrame::DebugBreak() {
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ DebugBreak();
+}
+#endif
+
+
 void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
                                  InvokeJSFlags flags,
                                  int arg_count) {
@@ -305,7 +313,7 @@
 
 void VirtualFrame::EmitPush(Register reg) {
   ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement());
+  elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
   stack_pointer_++;
   __ push(reg);
 }
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index 9a2f7d3..a45cfc6 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -68,7 +68,8 @@
   MacroAssembler* masm() { return cgen()->masm(); }
 
   // Create a duplicate of an existing valid frame element.
-  FrameElement CopyElementAt(int index);
+  FrameElement CopyElementAt(int index,
+                             NumberInfo::Type info = NumberInfo::kUnknown);
 
   // The number of elements on the virtual frame.
   int element_count() { return elements_.length(); }
@@ -297,6 +298,10 @@
   void CallRuntime(Runtime::Function* f, int arg_count);
   void CallRuntime(Runtime::FunctionId id, int arg_count);
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  void DebugBreak();
+#endif
+
   // Invoke builtin given the number of arguments it expects on (and
   // removes from) the stack.
   void InvokeBuiltin(Builtins::JavaScript id,
@@ -339,7 +344,7 @@
   void EmitPushMultiple(int count, int src_regs);
 
   // Push an element on the virtual frame.
-  void Push(Register reg);
+  void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
   void Push(Handle<Object> value);
   void Push(Smi* value) { Push(Handle<Object>(value)); }
 
diff --git a/src/array.js b/src/array.js
index c3ab179..c28a662 100644
--- a/src/array.js
+++ b/src/array.js
@@ -566,10 +566,11 @@
 function ArraySplice(start, delete_count) {
   var num_arguments = %_ArgumentsLength();
 
-  // SpiderMonkey and KJS return undefined in the case where no
+  // SpiderMonkey and JSC return undefined in the case where no
   // arguments are given instead of using the implicit undefined
   // arguments.  This does not follow ECMA-262, but we do the same for
   // compatibility.
+  // TraceMonkey follows ECMA-262 though.
   if (num_arguments == 0) return;
 
   var len = TO_UINT32(this.length);
@@ -582,7 +583,7 @@
     if (start_i > len) start_i = len;
   }
 
-  // SpiderMonkey and KJS treat the case where no delete count is
+  // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
   // given differently from when an undefined delete count is given.
   // This does not follow ECMA-262, but we do the same for
   // compatibility.
diff --git a/src/assembler.cc b/src/assembler.cc
index dbf2742..96d516f 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -430,6 +430,11 @@
       return "code target (js construct call)";
     case RelocInfo::CODE_TARGET_CONTEXT:
       return "code target (context)";
+    case RelocInfo::DEBUG_BREAK:
+#ifndef ENABLE_DEBUGGER_SUPPORT
+      UNREACHABLE();
+#endif
+      return "debug break";
     case RelocInfo::CODE_TARGET:
       return "code target";
     case RelocInfo::RUNTIME_ENTRY:
@@ -485,6 +490,11 @@
     case EMBEDDED_OBJECT:
       Object::VerifyPointer(target_object());
       break;
+    case DEBUG_BREAK:
+#ifndef ENABLE_DEBUGGER_SUPPORT
+      UNREACHABLE();
+      break;
+#endif
     case CONSTRUCT_CALL:
     case CODE_TARGET_CONTEXT:
     case CODE_TARGET: {
diff --git a/src/assembler.h b/src/assembler.h
index ec47d57..f401306 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -119,6 +119,7 @@
     // Please note the order is important (see IsCodeTarget, IsGCRelocMode).
     CONSTRUCT_CALL,  // code target that is a call to a JavaScript constructor.
     CODE_TARGET_CONTEXT,  // code target used for contextual loads.
+    DEBUG_BREAK,
     CODE_TARGET,         // code target which is not any of the above.
     EMBEDDED_OBJECT,
     EMBEDDED_STRING,
@@ -506,8 +507,10 @@
   return -(1 << (n-1)) <= x && x < (1 << (n-1));
 }
 
-static inline bool is_int24(int x)  { return is_intn(x, 24); }
 static inline bool is_int8(int x)  { return is_intn(x, 8); }
+static inline bool is_int16(int x)  { return is_intn(x, 16); }
+static inline bool is_int18(int x)  { return is_intn(x, 18); }
+static inline bool is_int24(int x)  { return is_intn(x, 24); }
 
 static inline bool is_uintn(int x, int n) {
   return (x & -(1 << n)) == 0;
@@ -519,9 +522,20 @@
 static inline bool is_uint5(int x)  { return is_uintn(x, 5); }
 static inline bool is_uint6(int x)  { return is_uintn(x, 6); }
 static inline bool is_uint8(int x)  { return is_uintn(x, 8); }
+static inline bool is_uint10(int x)  { return is_uintn(x, 10); }
 static inline bool is_uint12(int x)  { return is_uintn(x, 12); }
 static inline bool is_uint16(int x)  { return is_uintn(x, 16); }
 static inline bool is_uint24(int x)  { return is_uintn(x, 24); }
+static inline bool is_uint26(int x)  { return is_uintn(x, 26); }
+static inline bool is_uint28(int x)  { return is_uintn(x, 28); }
+
+static inline int NumberOfBitsSet(uint32_t x) {
+  unsigned int num_bits_set;
+  for (num_bits_set = 0; x; x >>= 1) {
+    num_bits_set += x & 1;
+  }
+  return num_bits_set;
+}
 
 } }  // namespace v8::internal
 
diff --git a/src/ast.h b/src/ast.h
index 48d0bfa..8e717a6 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -102,6 +102,7 @@
 // Forward declarations
 class TargetCollector;
 class MaterializedLiteral;
+class DefinitionInfo;
 
 #define DEF_FORWARD_DECLARATION(type) class type;
 AST_NODE_LIST(DEF_FORWARD_DECLARATION)
@@ -182,7 +183,7 @@
 
   static const int kNoLabel = -1;
 
-  Expression() : num_(kNoLabel) {}
+  Expression() : num_(kNoLabel), def_(NULL), defined_vars_(NULL) {}
 
   virtual Expression* AsExpression()  { return this; }
 
@@ -193,6 +194,11 @@
   // names because [] for string objects is handled only by keyed ICs.
   virtual bool IsPropertyName() { return false; }
 
+  // True if the expression does not have (evaluated) subexpressions.
+  // Function literals are leaves because their subexpressions are not
+  // evaluated.
+  virtual bool IsLeaf() { return false; }
+
   // Mark the expression as being compiled as an expression
   // statement. This is used to transform postfix increments to
   // (faster) prefix increments.
@@ -206,9 +212,20 @@
   // AST node numbering ordered by evaluation order.
   void set_num(int n) { num_ = n; }
 
+  // Data flow information.
+  DefinitionInfo* var_def() { return def_; }
+  void set_var_def(DefinitionInfo* def) { def_ = def; }
+
+  ZoneList<DefinitionInfo*>* defined_vars() { return defined_vars_; }
+  void set_defined_vars(ZoneList<DefinitionInfo*>* defined_vars) {
+    defined_vars_ = defined_vars;
+  }
+
  private:
   StaticType type_;
   int num_;
+  DefinitionInfo* def_;
+  ZoneList<DefinitionInfo*>* defined_vars_;
 };
 
 
@@ -720,6 +737,8 @@
     return false;
   }
 
+  virtual bool IsLeaf() { return true; }
+
   // Identity testers.
   bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
   bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@@ -802,6 +821,8 @@
   virtual ObjectLiteral* AsObjectLiteral() { return this; }
   virtual void Accept(AstVisitor* v);
 
+  virtual bool IsLeaf() { return properties()->is_empty(); }
+
   Handle<FixedArray> constant_properties() const {
     return constant_properties_;
   }
@@ -825,6 +846,8 @@
 
   virtual void Accept(AstVisitor* v);
 
+  virtual bool IsLeaf() { return true; }
+
   Handle<String> pattern() const { return pattern_; }
   Handle<String> flags() const { return flags_; }
 
@@ -849,6 +872,8 @@
   virtual void Accept(AstVisitor* v);
   virtual ArrayLiteral* AsArrayLiteral() { return this; }
 
+  virtual bool IsLeaf() { return values()->is_empty(); }
+
   Handle<FixedArray> constant_elements() const { return constant_elements_; }
   ZoneList<Expression*>* values() const { return values_; }
 
@@ -896,6 +921,11 @@
     return var_ == NULL ? true : var_->IsValidLeftHandSide();
   }
 
+  virtual bool IsLeaf() {
+    ASSERT(var_ != NULL);  // Variable must be resolved.
+    return var()->is_global() || var()->rewrite()->IsLeaf();
+  }
+
   bool IsVariable(Handle<String> n) {
     return !is_this() && name().is_identical_to(n);
   }
@@ -981,6 +1011,8 @@
   // Type testing & conversion
   virtual Slot* AsSlot() { return this; }
 
+  virtual bool IsLeaf() { return true; }
+
   // Accessors
   Variable* var() const { return var_; }
   Type type() const { return type_; }
@@ -1337,6 +1369,8 @@
   // Type testing & conversion
   virtual FunctionLiteral* AsFunctionLiteral()  { return this; }
 
+  virtual bool IsLeaf() { return true; }
+
   Handle<String> name() const  { return name_; }
   Scope* scope() const  { return scope_; }
   ZoneList<Statement*>* body() const  { return body_; }
@@ -1403,6 +1437,8 @@
 
   Handle<JSFunction> boilerplate() const { return boilerplate_; }
 
+  virtual bool IsLeaf() { return true; }
+
   virtual void Accept(AstVisitor* v);
 
  private:
@@ -1413,6 +1449,7 @@
 class ThisFunction: public Expression {
  public:
   virtual void Accept(AstVisitor* v);
+  virtual bool IsLeaf() { return true; }
 };
 
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 78d0995..a7cf421 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -192,116 +192,6 @@
 }
 
 
-// Pending fixups are code positions that refer to builtin code
-// objects that were not available at the time the code was generated.
-// The pending list is processed whenever an environment has been
-// created.
-class PendingFixups : public AllStatic {
- public:
-  static void Add(Code* code, MacroAssembler* masm);
-  static bool Process(Handle<JSBuiltinsObject> builtins);
-
-  static void Iterate(ObjectVisitor* v);
-
- private:
-  static List<Object*> code_;
-  static List<const char*> name_;
-  static List<int> pc_;
-  static List<uint32_t> flags_;
-
-  static void Clear();
-};
-
-
-List<Object*> PendingFixups::code_(0);
-List<const char*> PendingFixups::name_(0);
-List<int> PendingFixups::pc_(0);
-List<uint32_t> PendingFixups::flags_(0);
-
-
-void PendingFixups::Add(Code* code, MacroAssembler* masm) {
-  // Note this code is not only called during bootstrapping.
-  List<MacroAssembler::Unresolved>* unresolved = masm->unresolved();
-  int n = unresolved->length();
-  for (int i = 0; i < n; i++) {
-    const char* name = unresolved->at(i).name;
-    code_.Add(code);
-    name_.Add(name);
-    pc_.Add(unresolved->at(i).pc);
-    flags_.Add(unresolved->at(i).flags);
-    LOG(StringEvent("unresolved", name));
-  }
-}
-
-
-bool PendingFixups::Process(Handle<JSBuiltinsObject> builtins) {
-  HandleScope scope;
-  // NOTE: Extra fixups may be added to the list during the iteration
-  // due to lazy compilation of functions during the processing. Do not
-  // cache the result of getting the length of the code list.
-  for (int i = 0; i < code_.length(); i++) {
-    const char* name = name_[i];
-    uint32_t flags = flags_[i];
-    Handle<String> symbol = Factory::LookupAsciiSymbol(name);
-    Object* o = builtins->GetProperty(*symbol);
-#ifdef DEBUG
-    if (!o->IsJSFunction()) {
-      V8_Fatal(__FILE__, __LINE__, "Cannot resolve call to builtin %s", name);
-    }
-#endif
-    Handle<SharedFunctionInfo> shared(JSFunction::cast(o)->shared());
-    // Make sure the number of parameters match the formal parameter count.
-    int argc = Bootstrapper::FixupFlagsArgumentsCount::decode(flags);
-    USE(argc);
-    ASSERT(shared->formal_parameter_count() == argc);
-    // Do lazy compilation if necessary and check for stack overflows.
-    if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) {
-      Clear();
-      return false;
-    }
-    Code* code = Code::cast(code_[i]);
-    Address pc = code->instruction_start() + pc_[i];
-    RelocInfo target(pc, RelocInfo::CODE_TARGET, 0);
-    bool use_code_object = Bootstrapper::FixupFlagsUseCodeObject::decode(flags);
-    if (use_code_object) {
-      target.set_target_object(shared->code());
-    } else {
-      target.set_target_address(shared->code()->instruction_start());
-    }
-    LOG(StringEvent("resolved", name));
-  }
-  Clear();
-
-  // TODO(1240818): We should probably try to avoid doing this for all
-  // the V8 builtin JS files. It should only happen after running
-  // runtime.js - just like there shouldn't be any fixups left after
-  // that.
-  for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
-    Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
-    Handle<String> name = Factory::LookupAsciiSymbol(Builtins::GetName(id));
-    JSFunction* function = JSFunction::cast(builtins->GetProperty(*name));
-    builtins->set_javascript_builtin(id, function);
-  }
-
-  return true;
-}
-
-
-void PendingFixups::Clear() {
-  code_.Clear();
-  name_.Clear();
-  pc_.Clear();
-  flags_.Clear();
-}
-
-
-void PendingFixups::Iterate(ObjectVisitor* v) {
-  if (!code_.is_empty()) {
-    v->VisitPointers(&code_[0], &code_[0] + code_.length());
-  }
-}
-
-
 class Genesis BASE_EMBEDDED {
  public:
   Genesis(Handle<Object> global_object,
@@ -338,6 +228,7 @@
   bool InstallExtension(const char* name);
   bool InstallExtension(v8::RegisteredExtension* current);
   bool InstallSpecialObjects();
+  bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
   bool ConfigureApiObject(Handle<JSObject> object,
                           Handle<ObjectTemplateInfo> object_template);
   bool ConfigureGlobalObjects(v8::Handle<v8::ObjectTemplate> global_template);
@@ -379,15 +270,6 @@
   v->Synchronize("NativesCache");
   extensions_cache.Iterate(v);
   v->Synchronize("Extensions");
-  PendingFixups::Iterate(v);
-  v->Synchronize("PendingFixups");
-}
-
-
-// While setting up the environment, we collect code positions that
-// need to be patched before we can run any code in the environment.
-void Bootstrapper::AddFixup(Code* code, MacroAssembler* masm) {
-  PendingFixups::Add(code, masm);
 }
 
 
@@ -841,11 +723,11 @@
 #ifdef DEBUG
     LookupResult lookup;
     result->LocalLookup(Heap::callee_symbol(), &lookup);
-    ASSERT(lookup.IsValid() && (lookup.type() == FIELD));
+    ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
     ASSERT(lookup.GetFieldIndex() == Heap::arguments_callee_index);
 
     result->LocalLookup(Heap::length_symbol(), &lookup);
-    ASSERT(lookup.IsValid() && (lookup.type() == FIELD));
+    ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
     ASSERT(lookup.GetFieldIndex() == Heap::arguments_length_index);
 
     ASSERT(result->map()->inobject_properties() > Heap::arguments_callee_index);
@@ -942,7 +824,8 @@
     ASSERT(source->IsAsciiRepresentation());
     Handle<String> script_name = Factory::NewStringFromUtf8(name);
     boilerplate =
-        Compiler::Compile(source, script_name, 0, 0, extension, NULL);
+        Compiler::Compile(source, script_name, 0, 0, extension, NULL,
+                          Handle<String>::null());
     if (boilerplate.is_null()) return false;
     cache->Add(name, boilerplate);
   }
@@ -968,8 +851,7 @@
   Handle<Object> result =
       Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
   if (has_pending_exception) return false;
-  return PendingFixups::Process(
-      Handle<JSBuiltinsObject>(Top::context()->builtins()));
+  return true;
 }
 
 
@@ -989,7 +871,6 @@
   INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
   INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
   INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
-  INSTALL_NATIVE(JSFunction, "ToBoolean", to_boolean_fun);
   INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
   INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
   INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
@@ -1176,6 +1057,10 @@
          i < Natives::GetBuiltinsCount();
          i++) {
       if (!CompileBuiltin(i)) return false;
+      // TODO(ager): We really only need to install the JS builtin
+      // functions on the builtins object after compiling and running
+      // runtime.js.
+      if (!InstallJSBuiltins(builtins)) return false;
     }
 
     // Setup natives with lazy loading.
@@ -1377,6 +1262,22 @@
 }
 
 
+bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
+  HandleScope scope;
+  for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
+    Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
+    Handle<String> name = Factory::LookupAsciiSymbol(Builtins::GetName(id));
+    Handle<JSFunction> function
+        = Handle<JSFunction>(JSFunction::cast(builtins->GetProperty(*name)));
+    builtins->set_javascript_builtin(id, *function);
+    Handle<SharedFunctionInfo> shared
+        = Handle<SharedFunctionInfo>(function->shared());
+    if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
+  }
+  return true;
+}
+
+
 bool Genesis::ConfigureGlobalObjects(
     v8::Handle<v8::ObjectTemplate> global_proxy_template) {
   Handle<JSObject> global_proxy(
@@ -1451,7 +1352,7 @@
           LookupResult result;
           to->LocalLookup(descs->GetKey(i), &result);
           // If the property is already there we skip it
-          if (result.IsValid()) continue;
+          if (result.IsProperty()) continue;
           HandleScope inner;
           Handle<DescriptorArray> inst_descs =
               Handle<DescriptorArray>(to->map()->instance_descriptors());
@@ -1488,7 +1389,7 @@
         // If the property is already there we skip it.
         LookupResult result;
         to->LocalLookup(String::cast(raw_key), &result);
-        if (result.IsValid()) continue;
+        if (result.IsProperty()) continue;
         // Set the property.
         Handle<String> key = Handle<String>(String::cast(raw_key));
         Handle<Object> value = Handle<Object>(properties->ValueAt(i));
@@ -1572,25 +1473,33 @@
 void Genesis::BuildSpecialFunctionTable() {
   HandleScope scope;
   Handle<JSObject> global = Handle<JSObject>(global_context()->global());
-  // Add special versions for Array.prototype.pop and push.
+  // Add special versions for some Array.prototype functions.
   Handle<JSFunction> function =
       Handle<JSFunction>(
           JSFunction::cast(global->GetProperty(Heap::Array_symbol())));
   Handle<JSObject> visible_prototype =
       Handle<JSObject>(JSObject::cast(function->prototype()));
-  // Remember to put push and pop on the hidden prototype if it's there.
-  Handle<JSObject> push_and_pop_prototype;
+  // Remember to put those specializations on the hidden prototype if present.
+  Handle<JSObject> special_prototype;
   Handle<Object> superproto(visible_prototype->GetPrototype());
   if (superproto->IsJSObject() &&
       JSObject::cast(*superproto)->map()->is_hidden_prototype()) {
-    push_and_pop_prototype = Handle<JSObject>::cast(superproto);
+    special_prototype = Handle<JSObject>::cast(superproto);
   } else {
-    push_and_pop_prototype = visible_prototype;
+    special_prototype = visible_prototype;
   }
-  AddSpecialFunction(push_and_pop_prototype, "pop",
+  AddSpecialFunction(special_prototype, "pop",
                      Handle<Code>(Builtins::builtin(Builtins::ArrayPop)));
-  AddSpecialFunction(push_and_pop_prototype, "push",
+  AddSpecialFunction(special_prototype, "push",
                      Handle<Code>(Builtins::builtin(Builtins::ArrayPush)));
+  AddSpecialFunction(special_prototype, "shift",
+                     Handle<Code>(Builtins::builtin(Builtins::ArrayShift)));
+  AddSpecialFunction(special_prototype, "unshift",
+                     Handle<Code>(Builtins::builtin(Builtins::ArrayUnshift)));
+  AddSpecialFunction(special_prototype, "slice",
+                     Handle<Code>(Builtins::builtin(Builtins::ArraySlice)));
+  AddSpecialFunction(special_prototype, "splice",
+                     Handle<Code>(Builtins::builtin(Builtins::ArraySplice)));
 }
 
 
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 7cd3a2b..cc775b2 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -59,9 +59,6 @@
                                  Handle<JSFunction>* handle);
   static void NativesCacheAdd(Vector<const char> name, Handle<JSFunction> fun);
 
-  // Append code that needs fixup at the end of boot strapping.
-  static void AddFixup(Code* code, MacroAssembler* masm);
-
   // Tells whether bootstrapping is active.
   static bool IsActive();
 
diff --git a/src/builtins.cc b/src/builtins.cc
index db0770f..8e88c28 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -168,28 +168,6 @@
 // ----------------------------------------------------------------------------
 
 
-Handle<Code> Builtins::GetCode(JavaScript id, bool* resolved) {
-  Code* code = Builtins::builtin(Builtins::Illegal);
-  *resolved = false;
-
-  if (Top::context() != NULL) {
-    Object* object = Top::builtins()->javascript_builtin(id);
-    if (object->IsJSFunction()) {
-      Handle<SharedFunctionInfo> shared(JSFunction::cast(object)->shared());
-      // Make sure the number of parameters match the formal parameter count.
-      ASSERT(shared->formal_parameter_count() ==
-             Builtins::GetArgumentsCount(id));
-      if (EnsureCompiled(shared, CLEAR_EXCEPTION)) {
-        code = shared->code();
-        *resolved = true;
-      }
-    }
-  }
-
-  return Handle<Code>(code);
-}
-
-
 BUILTIN(Illegal) {
   UNREACHABLE();
   return Heap::undefined_value();  // Make compiler happy.
@@ -268,19 +246,19 @@
   JSArray* array = JSArray::cast(*args.receiver());
   ASSERT(array->HasFastElements());
 
-  // Make sure we have space for the elements.
   int len = Smi::cast(array->length())->value();
+  int to_add = args.length() - 1;
+  if (to_add == 0) {
+    return Smi::FromInt(len);
+  }
+  // Currently fixed arrays cannot grow too big, so
+  // we should never hit this case.
+  ASSERT(to_add <= (Smi::kMaxValue - len));
 
-  // Set new length.
-  int new_length = len + args.length() - 1;
+  int new_length = len + to_add;
   FixedArray* elms = FixedArray::cast(array->elements());
 
-  if (new_length <= elms->length()) {
-    // Backing storage has extra space for the provided values.
-    for (int index = 0; index < args.length() - 1; index++) {
-      elms->set(index + len, args[index+1]);
-    }
-  } else {
+  if (new_length > elms->length()) {
     // New backing storage is needed.
     int capacity = new_length + (new_length >> 1) + 16;
     Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
@@ -291,16 +269,21 @@
     WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
     // Fill out the new array with old elements.
     for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode);
-    // Add the provided values.
-    for (int index = 0; index < args.length() - 1; index++) {
-      new_elms->set(index + len, args[index+1], mode);
-    }
-    // Set the new backing storage.
-    array->set_elements(new_elms);
+    elms = new_elms;
+    array->set_elements(elms);
   }
+
+  AssertNoAllocation no_gc;
+  WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+
+  // Add the provided values.
+  for (int index = 0; index < to_add; index++) {
+    elms->set(index + len, args[index + 1], mode);
+  }
+
   // Set the length.
   array->set_length(Smi::FromInt(new_length));
-  return array->length();
+  return Smi::FromInt(new_length);
 }
 
 
@@ -335,6 +318,355 @@
 }
 
 
+static Object* GetElementToMove(uint32_t index,
+                                FixedArray* elms,
+                                JSObject* prototype) {
+  Object* e = elms->get(index);
+  if (e->IsTheHole() && prototype->HasElement(index)) {
+    e = prototype->GetElement(index);
+  }
+  return e;
+}
+
+
+BUILTIN(ArrayShift) {
+  JSArray* array = JSArray::cast(*args.receiver());
+  ASSERT(array->HasFastElements());
+
+  int len = Smi::cast(array->length())->value();
+  if (len == 0) return Heap::undefined_value();
+
+  // Fetch the prototype.
+  JSFunction* array_function =
+      Top::context()->global_context()->array_function();
+  JSObject* prototype = JSObject::cast(array_function->prototype());
+
+  FixedArray* elms = FixedArray::cast(array->elements());
+
+  // Get first element
+  Object* first = elms->get(0);
+  if (first->IsTheHole()) {
+    first = prototype->GetElement(0);
+  }
+
+  // Shift the elements.
+  for (int i = 0; i < len - 1; i++) {
+    elms->set(i, GetElementToMove(i + 1, elms, prototype));
+  }
+  elms->set(len - 1, Heap::the_hole_value());
+
+  // Set the length.
+  array->set_length(Smi::FromInt(len - 1));
+
+  return first;
+}
+
+
+BUILTIN(ArrayUnshift) {
+  JSArray* array = JSArray::cast(*args.receiver());
+  ASSERT(array->HasFastElements());
+
+  int len = Smi::cast(array->length())->value();
+  int to_add = args.length() - 1;
+  // Note that we cannot quit early if to_add == 0 as
+  // values should be lifted from prototype into
+  // the array.
+
+  int new_length = len + to_add;
+  // Currently fixed arrays cannot grow too big, so
+  // we should never hit this case.
+  ASSERT(to_add <= (Smi::kMaxValue - len));
+
+  FixedArray* elms = FixedArray::cast(array->elements());
+
+  // Fetch the prototype.
+  JSFunction* array_function =
+      Top::context()->global_context()->array_function();
+  JSObject* prototype = JSObject::cast(array_function->prototype());
+
+  if (new_length > elms->length()) {
+    // New backing storage is needed.
+    int capacity = new_length + (new_length >> 1) + 16;
+    Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+    if (obj->IsFailure()) return obj;
+
+    AssertNoAllocation no_gc;
+    FixedArray* new_elms = FixedArray::cast(obj);
+    WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
+    // Fill out the new array with old elements.
+    for (int i = 0; i < len; i++)
+      new_elms->set(to_add + i,
+                    GetElementToMove(i, elms, prototype),
+                    mode);
+
+    elms = new_elms;
+    array->set_elements(elms);
+  } else {
+    AssertNoAllocation no_gc;
+    WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+
+    // Move elements to the right
+    for (int i = 0; i < len; i++) {
+      elms->set(new_length - i - 1,
+                GetElementToMove(len - i - 1, elms, prototype),
+                mode);
+    }
+  }
+
+  // Add the provided values.
+  AssertNoAllocation no_gc;
+  WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+  for (int i = 0; i < to_add; i++) {
+    elms->set(i, args[i + 1], mode);
+  }
+
+  // Set the length.
+  array->set_length(Smi::FromInt(new_length));
+  return Smi::FromInt(new_length);
+}
+
+
+static Object* CallJsBuiltin(const char* name,
+                             BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
+  HandleScope handleScope;
+
+  Handle<Object> js_builtin =
+      GetProperty(Handle<JSObject>(Top::global_context()->builtins()),
+                  name);
+  ASSERT(js_builtin->IsJSFunction());
+  Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
+  Vector<Object**> argv(Vector<Object**>::New(args.length() - 1));
+  int n_args = args.length() - 1;
+  for (int i = 0; i < n_args; i++) {
+    argv[i] = &args[i + 1];
+  }
+  bool pending_exception = false;
+  Handle<Object> result = Execution::Call(function,
+                                          args.receiver(),
+                                          n_args,
+                                          argv.start(),
+                                          &pending_exception);
+  if (pending_exception) return Failure::Exception();
+  return *result;
+}
+
+
+BUILTIN(ArraySlice) {
+  JSArray* array = JSArray::cast(*args.receiver());
+  ASSERT(array->HasFastElements());
+
+  int len = Smi::cast(array->length())->value();
+
+  int n_arguments = args.length() - 1;
+
+  // Note carefully choosen defaults---if argument is missing,
+  // it's undefined which gets converted to 0 for relativeStart
+  // and to len for relativeEnd.
+  int relativeStart = 0;
+  int relativeEnd = len;
+  if (n_arguments > 0) {
+    Object* arg1 = args[1];
+    if (arg1->IsSmi()) {
+      relativeStart = Smi::cast(arg1)->value();
+    } else if (!arg1->IsUndefined()) {
+      return CallJsBuiltin("ArraySlice", args);
+    }
+    if (n_arguments > 1) {
+      Object* arg2 = args[2];
+      if (arg2->IsSmi()) {
+        relativeEnd = Smi::cast(arg2)->value();
+      } else if (!arg2->IsUndefined()) {
+        return CallJsBuiltin("ArraySlice", args);
+      }
+    }
+  }
+
+  // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
+  int k = (relativeStart < 0) ? Max(len + relativeStart, 0)
+                              : Min(relativeStart, len);
+
+  // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
+  int final = (relativeEnd < 0) ? Max(len + relativeEnd, 0)
+                                : Min(relativeEnd, len);
+
+  // Calculate the length of result array.
+  int result_len = final - k;
+  if (result_len < 0) {
+    result_len = 0;
+  }
+
+  JSFunction* array_function =
+      Top::context()->global_context()->array_function();
+  Object* result = Heap::AllocateJSObject(array_function);
+  if (result->IsFailure()) return result;
+  JSArray* result_array = JSArray::cast(result);
+
+  result = Heap::AllocateFixedArrayWithHoles(result_len);
+  if (result->IsFailure()) return result;
+  FixedArray* result_elms = FixedArray::cast(result);
+
+  FixedArray* elms = FixedArray::cast(array->elements());
+
+  // Fetch the prototype.
+  JSObject* prototype = JSObject::cast(array_function->prototype());
+
+  AssertNoAllocation no_gc;
+  WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc);
+
+  // Fill newly created array.
+  for (int i = 0; i < result_len; i++) {
+    result_elms->set(i,
+                     GetElementToMove(k + i, elms, prototype),
+                     mode);
+  }
+
+  // Set elements.
+  result_array->set_elements(result_elms);
+
+  // Set the length.
+  result_array->set_length(Smi::FromInt(result_len));
+  return result_array;
+}
+
+
+BUILTIN(ArraySplice) {
+  JSArray* array = JSArray::cast(*args.receiver());
+  ASSERT(array->HasFastElements());
+
+  int len = Smi::cast(array->length())->value();
+
+  int n_arguments = args.length() - 1;
+
+  // SpiderMonkey and JSC return undefined in the case where no
+  // arguments are given instead of using the implicit undefined
+  // arguments.  This does not follow ECMA-262, but we do the same for
+  // compatibility.
+  // TraceMonkey follows ECMA-262 though.
+  if (n_arguments == 0) {
+    return Heap::undefined_value();
+  }
+
+  int relativeStart = 0;
+  Object* arg1 = args[1];
+  if (arg1->IsSmi()) {
+    relativeStart = Smi::cast(arg1)->value();
+  } else if (!arg1->IsUndefined()) {
+    return CallJsBuiltin("ArraySplice", args);
+  }
+  int actualStart = (relativeStart < 0) ? Max(len + relativeStart, 0)
+                                        : Min(relativeStart, len);
+
+  // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
+  // given differently from when an undefined delete count is given.
+  // This does not follow ECMA-262, but we do the same for
+  // compatibility.
+  int deleteCount = len;
+  if (n_arguments > 1) {
+    Object* arg2 = args[2];
+    if (arg2->IsSmi()) {
+      deleteCount = Smi::cast(arg2)->value();
+    } else {
+      return CallJsBuiltin("ArraySplice", args);
+    }
+  }
+  int actualDeleteCount = Min(Max(deleteCount, 0), len - actualStart);
+
+  JSFunction* array_function =
+      Top::context()->global_context()->array_function();
+
+  // Allocate result array.
+  Object* result = Heap::AllocateJSObject(array_function);
+  if (result->IsFailure()) return result;
+  JSArray* result_array = JSArray::cast(result);
+
+  result = Heap::AllocateFixedArrayWithHoles(actualDeleteCount);
+  if (result->IsFailure()) return result;
+  FixedArray* result_elms = FixedArray::cast(result);
+
+  FixedArray* elms = FixedArray::cast(array->elements());
+
+  // Fetch the prototype.
+  JSObject* prototype = JSObject::cast(array_function->prototype());
+
+  AssertNoAllocation no_gc;
+  WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc);
+
+  // Fill newly created array.
+  for (int k = 0; k < actualDeleteCount; k++) {
+    result_elms->set(k,
+                     GetElementToMove(actualStart + k, elms, prototype),
+                     mode);
+  }
+
+  // Set elements.
+  result_array->set_elements(result_elms);
+
+  // Set the length.
+  result_array->set_length(Smi::FromInt(actualDeleteCount));
+
+  int itemCount = (n_arguments > 1) ? (n_arguments - 2) : 0;
+
+  int new_length = len - actualDeleteCount + itemCount;
+
+  mode = elms->GetWriteBarrierMode(no_gc);
+  if (itemCount < actualDeleteCount) {
+    // Shrink the array.
+    for (int k = actualStart; k < (len - actualDeleteCount); k++) {
+      elms->set(k + itemCount,
+                GetElementToMove(k + actualDeleteCount, elms, prototype),
+                mode);
+    }
+
+    for (int k = len; k > new_length; k--) {
+      elms->set(k - 1, Heap::the_hole_value());
+    }
+  } else if (itemCount > actualDeleteCount) {
+    // Currently fixed arrays cannot grow too big, so
+    // we should never hit this case.
+    ASSERT((itemCount - actualDeleteCount) <= (Smi::kMaxValue - len));
+
+    FixedArray* source_elms = elms;
+
+    // Check if array need to grow.
+    if (new_length > elms->length()) {
+      // New backing storage is needed.
+      int capacity = new_length + (new_length >> 1) + 16;
+      Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+      if (obj->IsFailure()) return obj;
+
+      FixedArray* new_elms = FixedArray::cast(obj);
+      mode = new_elms->GetWriteBarrierMode(no_gc);
+
+      // Copy the part before actualStart as is.
+      for (int k = 0; k < actualStart; k++) {
+        new_elms->set(k, elms->get(k), mode);
+      }
+
+      source_elms = elms;
+      elms = new_elms;
+      array->set_elements(elms);
+    }
+
+    for (int k = len - actualDeleteCount; k > actualStart; k--) {
+      elms->set(k + itemCount - 1,
+                GetElementToMove(k + actualDeleteCount - 1,
+                                 source_elms,
+                                 prototype),
+                mode);
+    }
+  }
+
+  for (int k = actualStart; k < actualStart + itemCount; k++) {
+    elms->set(k, args[3 + k - actualStart], mode);
+  }
+
+  // Set the length.
+  array->set_length(Smi::FromInt(new_length));
+
+  return result_array;
+}
+
+
 // -----------------------------------------------------------------------------
 //
 
@@ -474,6 +806,76 @@
 }
 
 
+#ifdef DEBUG
+
+static void VerifyTypeCheck(Handle<JSObject> object,
+                            Handle<JSFunction> function) {
+  FunctionTemplateInfo* info =
+      FunctionTemplateInfo::cast(function->shared()->function_data());
+  if (info->signature()->IsUndefined()) return;
+  SignatureInfo* signature = SignatureInfo::cast(info->signature());
+  Object* receiver_type = signature->receiver();
+  if (receiver_type->IsUndefined()) return;
+  FunctionTemplateInfo* type = FunctionTemplateInfo::cast(receiver_type);
+  ASSERT(object->IsInstanceOf(type));
+}
+
+#endif
+
+
+BUILTIN(FastHandleApiCall) {
+  ASSERT(!CalledAsConstructor());
+  const bool is_construct = false;
+
+  // We expect four more arguments: function, callback, call data, and holder.
+  const int args_length = args.length() - 4;
+  ASSERT(args_length >= 0);
+
+  Handle<JSFunction> function = args.at<JSFunction>(args_length);
+  Object* callback_obj = args[args_length + 1];
+  Handle<Object> data_handle = args.at<Object>(args_length + 2);
+  Handle<JSObject> checked_holder = args.at<JSObject>(args_length + 3);
+
+#ifdef DEBUG
+  VerifyTypeCheck(checked_holder, function);
+#endif
+
+  v8::Local<v8::Object> holder = v8::Utils::ToLocal(checked_holder);
+  v8::Local<v8::Function> callee = v8::Utils::ToLocal(function);
+  v8::InvocationCallback callback =
+      v8::ToCData<v8::InvocationCallback>(callback_obj);
+  v8::Local<v8::Value> data = v8::Utils::ToLocal(data_handle);
+
+  v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
+      data,
+      holder,
+      callee,
+      is_construct,
+      reinterpret_cast<void**>(&args[0] - 1),
+      args_length - 1);
+
+  HandleScope scope;
+  Object* result;
+  v8::Handle<v8::Value> value;
+  {
+    // Leaving JavaScript.
+    VMState state(EXTERNAL);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+    state.set_external_callback(v8::ToCData<Address>(callback_obj));
+#endif
+    value = callback(new_args);
+  }
+  if (value.IsEmpty()) {
+    result = Heap::undefined_value();
+  } else {
+    result = *reinterpret_cast<Object**>(*value);
+  }
+
+  RETURN_IF_SCHEDULED_EXCEPTION();
+  return result;
+}
+
+
 // Helper function to handle calls to non-function objects created through the
 // API. The object can be called as either a constructor (using new) or just as
 // a function (without new).
@@ -657,6 +1059,10 @@
   KeyedLoadIC::GeneratePreMonomorphic(masm);
 }
 
+static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateIndexedInterceptor(masm);
+}
+
 
 static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
   StoreIC::GenerateInitialize(masm);
@@ -668,15 +1074,16 @@
 }
 
 
-static void Generate_StoreIC_ExtendStorage(MacroAssembler* masm) {
-  StoreIC::GenerateExtendStorage(masm);
-}
-
 static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
   StoreIC::GenerateMegamorphic(masm);
 }
 
 
+static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) {
+  StoreIC::GenerateArrayLength(masm);
+}
+
+
 static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
   KeyedStoreIC::GenerateGeneric(masm);
 }
@@ -720,11 +1127,6 @@
 }
 
 
-static void Generate_KeyedStoreIC_ExtendStorage(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateExtendStorage(masm);
-}
-
-
 static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
   KeyedStoreIC::GenerateMiss(masm);
 }
@@ -869,9 +1271,6 @@
           v8::internal::V8::FatalProcessOutOfMemory("CreateCode");
         }
       }
-      // Add any unresolved jumps or calls to the fixup list in the
-      // bootstrapper.
-      Bootstrapper::AddFixup(Code::cast(code), &masm);
       // Log the event and add the code to the builtins array.
       LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
                           Code::cast(code), functions[i].s_name));
diff --git a/src/builtins.h b/src/builtins.h
index 418948f..595e9a4 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -48,8 +48,13 @@
                                                                     \
   V(ArrayPush, NO_EXTRA_ARGUMENTS)                                  \
   V(ArrayPop, NO_EXTRA_ARGUMENTS)                                   \
+  V(ArrayShift, NO_EXTRA_ARGUMENTS)                                 \
+  V(ArrayUnshift, NO_EXTRA_ARGUMENTS)                               \
+  V(ArraySlice, NO_EXTRA_ARGUMENTS)                                 \
+  V(ArraySplice, NO_EXTRA_ARGUMENTS)                                \
                                                                     \
   V(HandleApiCall, NEEDS_CALLED_FUNCTION)                           \
+  V(FastHandleApiCall, NO_EXTRA_ARGUMENTS)                          \
   V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION)                  \
   V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS)                    \
   V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS)
@@ -69,9 +74,6 @@
   V(StoreIC_Miss,               BUILTIN, UNINITIALIZED)                   \
   V(KeyedStoreIC_Miss,          BUILTIN, UNINITIALIZED)                   \
                                                                           \
-  V(StoreIC_ExtendStorage,      BUILTIN, UNINITIALIZED)                   \
-  V(KeyedStoreIC_ExtendStorage, BUILTIN, UNINITIALIZED)                   \
-                                                                          \
   V(LoadIC_Initialize,          LOAD_IC, UNINITIALIZED)                   \
   V(LoadIC_PreMonomorphic,      LOAD_IC, PREMONOMORPHIC)                  \
   V(LoadIC_Normal,              LOAD_IC, MONOMORPHIC)                     \
@@ -91,8 +93,10 @@
   V(KeyedLoadIC_ExternalIntArray,           KEYED_LOAD_IC, MEGAMORPHIC)   \
   V(KeyedLoadIC_ExternalUnsignedIntArray,   KEYED_LOAD_IC, MEGAMORPHIC)   \
   V(KeyedLoadIC_ExternalFloatArray,         KEYED_LOAD_IC, MEGAMORPHIC)   \
+  V(KeyedLoadIC_IndexedInterceptor,         KEYED_LOAD_IC, MEGAMORPHIC)   \
                                                                           \
   V(StoreIC_Initialize,         STORE_IC, UNINITIALIZED)                  \
+  V(StoreIC_ArrayLength,        STORE_IC, MONOMORPHIC)                    \
   V(StoreIC_Megamorphic,        STORE_IC, MEGAMORPHIC)                    \
                                                                           \
   V(KeyedStoreIC_Initialize,    KEYED_STORE_IC, UNINITIALIZED)            \
diff --git a/src/checks.h b/src/checks.h
index 3b0c851..eeb748b 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -125,7 +125,9 @@
                                      const char* expected,
                                      const char* value_source,
                                      const char* value) {
-  if (strcmp(expected, value) != 0) {
+  if ((expected == NULL && value != NULL) ||
+      (expected != NULL && value == NULL) ||
+      (expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
     V8_Fatal(file, line,
              "CHECK_EQ(%s, %s) failed\n#   Expected: %s\n#   Found: %s",
              expected_source, value_source, expected, value);
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 09581aa..4d0fd29 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -31,6 +31,7 @@
 #include "code-stubs.h"
 #include "factory.h"
 #include "macro-assembler.h"
+#include "oprofile-agent.h"
 
 namespace v8 {
 namespace internal {
@@ -60,8 +61,12 @@
 void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
   code->set_major_key(MajorKey());
 
-  // Add unresolved entries in the code to the fixup list.
-  Bootstrapper::AddFixup(code, masm);
+#ifdef ENABLE_OPROFILE_AGENT
+  // Register the generated stub with the OPROFILE agent.
+  OProfileAgent::CreateNativeCodeRegion(GetName(),
+                                        code->instruction_start(),
+                                        code->instruction_size());
+#endif
 
   LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
   Counters::total_stubs_code_size.Increment(code->instruction_size());
@@ -149,13 +154,16 @@
 }
 
 
-const char* CodeStub::MajorName(CodeStub::Major major_key) {
+const char* CodeStub::MajorName(CodeStub::Major major_key,
+                                bool allow_unknown_keys) {
   switch (major_key) {
 #define DEF_CASE(name) case name: return #name;
     CODE_STUB_LIST(DEF_CASE)
 #undef DEF_CASE
     default:
-      UNREACHABLE();
+      if (!allow_unknown_keys) {
+        UNREACHABLE();
+      }
       return NULL;
   }
 }
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 16267f6..3901a64 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -55,6 +55,7 @@
   V(CounterOp)                           \
   V(ArgumentsAccess)                     \
   V(RegExpExec)                          \
+  V(NumberToString)                      \
   V(CEntry)                              \
   V(JSEntry)                             \
   V(DebuggerStatement)
@@ -100,7 +101,7 @@
   static int MinorKeyFromKey(uint32_t key) {
     return MinorKeyBits::decode(key);
   };
-  static const char* MajorName(Major major_key);
+  static const char* MajorName(Major major_key, bool allow_unknown_keys);
 
   virtual ~CodeStub() {}
 
@@ -138,7 +139,7 @@
   virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
 
   // Returns a name for logging/debugging purposes.
-  virtual const char* GetName() { return MajorName(MajorKey()); }
+  virtual const char* GetName() { return MajorName(MajorKey(), false); }
 
 #ifdef DEBUG
   virtual void Print() { PrintF("%s\n", GetName()); }
diff --git a/src/codegen-inl.h b/src/codegen-inl.h
index bee237d..da8cbf7 100644
--- a/src/codegen-inl.h
+++ b/src/codegen-inl.h
@@ -30,6 +30,7 @@
 #define V8_CODEGEN_INL_H_
 
 #include "codegen.h"
+#include "compiler.h"
 #include "register-allocator-inl.h"
 
 #if V8_TARGET_ARCH_IA32
@@ -38,6 +39,8 @@
 #include "x64/codegen-x64-inl.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/codegen-arm-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/codegen-mips-inl.h"
 #else
 #error Unsupported target architecture.
 #endif
@@ -46,42 +49,8 @@
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm_)
-
-// -----------------------------------------------------------------------------
-// Support for "structured" code comments.
-//
-// By selecting matching brackets in disassembler output,
-// code segments can be identified more easily.
-
-#ifdef DEBUG
-
-class Comment BASE_EMBEDDED {
- public:
-  Comment(MacroAssembler* masm, const char* msg) : masm_(masm), msg_(msg) {
-    __ RecordComment(msg);
-  }
-
-  ~Comment() {
-    if (msg_[0] == '[') __ RecordComment("]");
-  }
-
- private:
-  MacroAssembler* masm_;
-  const char* msg_;
-};
-
-#else
-
-class Comment BASE_EMBEDDED {
- public:
-  Comment(MacroAssembler*, const char*)  {}
-};
-
-#endif  // DEBUG
-
-#undef __
-
+Handle<Script> CodeGenerator::script() { return info_->script(); }
+bool CodeGenerator::is_eval() { return info_->is_eval(); }
 
 } }  // namespace v8::internal
 
diff --git a/src/codegen.cc b/src/codegen.cc
index cb6089b..01ee6d0 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -31,6 +31,7 @@
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
+#include "liveedit.h"
 #include "oprofile-agent.h"
 #include "prettyprinter.h"
 #include "register-allocator-inl.h"
@@ -42,6 +43,24 @@
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm_)
+
+#ifdef DEBUG
+
+Comment::Comment(MacroAssembler* masm, const char* msg)
+    : masm_(masm), msg_(msg) {
+  __ RecordComment(msg);
+}
+
+
+Comment::~Comment() {
+  if (msg_[0] == '[') __ RecordComment("]");
+}
+
+#endif  // DEBUG
+
+#undef __
+
 
 CodeGenerator* CodeGeneratorScope::top_ = NULL;
 
@@ -126,7 +145,7 @@
 }
 
 
-void CodeGenerator::MakeCodePrologue(FunctionLiteral* fun) {
+void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
 #ifdef DEBUG
   bool print_source = false;
   bool print_ast = false;
@@ -147,60 +166,61 @@
 
   if (FLAG_trace_codegen || print_source || print_ast) {
     PrintF("*** Generate code for %s function: ", ftype);
-    fun->name()->ShortPrint();
+    info->function()->name()->ShortPrint();
     PrintF(" ***\n");
   }
 
   if (print_source) {
-    PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(fun));
+    PrintF("--- Source from AST ---\n%s\n",
+           PrettyPrinter().PrintProgram(info->function()));
   }
 
   if (print_ast) {
-    PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(fun));
+    PrintF("--- AST ---\n%s\n",
+           AstPrinter().PrintProgram(info->function()));
   }
 
   if (print_json_ast) {
     JsonAstBuilder builder;
-    PrintF("%s", builder.BuildProgram(fun));
+    PrintF("%s", builder.BuildProgram(info->function()));
   }
 #endif  // DEBUG
 }
 
 
-Handle<Code> CodeGenerator::MakeCodeEpilogue(FunctionLiteral* fun,
-                                             MacroAssembler* masm,
+Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
                                              Code::Flags flags,
-                                             Handle<Script> script) {
+                                             CompilationInfo* info) {
   // Allocate and install the code.
   CodeDesc desc;
   masm->GetCode(&desc);
-  ZoneScopeInfo sinfo(fun->scope());
+  ZoneScopeInfo sinfo(info->scope());
   Handle<Code> code =
       Factory::NewCode(desc, &sinfo, flags, masm->CodeObject());
 
-  // Add unresolved entries in the code to the fixup list.
-  Bootstrapper::AddFixup(*code, masm);
-
 #ifdef ENABLE_DISASSEMBLER
   bool print_code = Bootstrapper::IsActive()
       ? FLAG_print_builtin_code
       : FLAG_print_code;
   if (print_code) {
     // Print the source code if available.
+    Handle<Script> script = info->script();
+    FunctionLiteral* function = info->function();
     if (!script->IsUndefined() && !script->source()->IsUndefined()) {
       PrintF("--- Raw source ---\n");
       StringInputBuffer stream(String::cast(script->source()));
-      stream.Seek(fun->start_position());
+      stream.Seek(function->start_position());
       // fun->end_position() points to the last character in the stream. We
       // need to compensate by adding one to calculate the length.
-      int source_len = fun->end_position() - fun->start_position() + 1;
+      int source_len =
+          function->end_position() - function->start_position() + 1;
       for (int i = 0; i < source_len; i++) {
         if (stream.has_more()) PrintF("%c", stream.GetNext());
       }
       PrintF("\n\n");
     }
     PrintF("--- Code ---\n");
-    code->Disassemble(*fun->name()->ToCString());
+    code->Disassemble(*function->name()->ToCString());
   }
 #endif  // ENABLE_DISASSEMBLER
 
@@ -214,21 +234,21 @@
 // Generate the code. Takes a function literal, generates code for it, assemble
 // all the pieces into a Code object. This function is only to be called by
 // the compiler.cc code.
-Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
-                                     Handle<Script> script,
-                                     bool is_eval,
-                                     CompilationInfo* info) {
+Handle<Code> CodeGenerator::MakeCode(CompilationInfo* info) {
+  LiveEditFunctionTracker live_edit_tracker(info->function());
+  Handle<Script> script = info->script();
   if (!script->IsUndefined() && !script->source()->IsUndefined()) {
     int len = String::cast(script->source())->length();
     Counters::total_old_codegen_source_size.Increment(len);
   }
-  MakeCodePrologue(fun);
+  MakeCodePrologue(info);
   // Generate code.
   const int kInitialBufferSize = 4 * KB;
   MacroAssembler masm(NULL, kInitialBufferSize);
-  CodeGenerator cgen(&masm, script, is_eval);
+  CodeGenerator cgen(&masm);
   CodeGeneratorScope scope(&cgen);
-  cgen.Generate(fun, PRIMARY, info);
+  live_edit_tracker.RecordFunctionScope(info->function()->scope());
+  cgen.Generate(info, PRIMARY);
   if (cgen.HasStackOverflow()) {
     ASSERT(!Top::has_pending_exception());
     return Handle<Code>::null();
@@ -236,7 +256,9 @@
 
   InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP;
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
-  return MakeCodeEpilogue(fun, cgen.masm(), flags, script);
+  Handle<Code> result = MakeCodeEpilogue(cgen.masm(), flags, info);
+  live_edit_tracker.RecordFunctionCode(result);
+  return result;
 }
 
 
@@ -355,6 +377,7 @@
   {&CodeGenerator::GenerateSubString, "_SubString"},
   {&CodeGenerator::GenerateStringCompare, "_StringCompare"},
   {&CodeGenerator::GenerateRegExpExec, "_RegExpExec"},
+  {&CodeGenerator::GenerateNumberToString, "_NumberToString"},
 };
 
 
@@ -506,10 +529,4 @@
 }
 
 
-void DebuggerStatementStub::Generate(MacroAssembler* masm) {
-  Runtime::Function* f = Runtime::FunctionForId(Runtime::kDebugBreak);
-  masm->TailCallRuntime(ExternalReference(f), 0, f->result_size);
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/codegen.h b/src/codegen.h
index d0be5f1..5c10cb6 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -31,6 +31,7 @@
 #include "ast.h"
 #include "code-stubs.h"
 #include "runtime.h"
+#include "number-info.h"
 
 // Include the declaration of the architecture defined class CodeGenerator.
 // The contract  to the shared code is that the the CodeGenerator is a subclass
@@ -86,6 +87,8 @@
 #include "x64/codegen-x64.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/codegen-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/codegen-mips.h"
 #else
 #error Unsupported target architecture.
 #endif
@@ -96,6 +99,29 @@
 namespace internal {
 
 
+// Support for "structured" code comments.
+#ifdef DEBUG
+
+class Comment BASE_EMBEDDED {
+ public:
+  Comment(MacroAssembler* masm, const char* msg);
+  ~Comment();
+
+ private:
+  MacroAssembler* masm_;
+  const char* msg_;
+};
+
+#else
+
+class Comment BASE_EMBEDDED {
+ public:
+  Comment(MacroAssembler*, const char*)  {}
+};
+
+#endif  // DEBUG
+
+
 // Code generation can be nested.  Code generation scopes form a stack
 // of active code generators.
 class CodeGeneratorScope BASE_EMBEDDED {
@@ -390,21 +416,6 @@
 };
 
 
-// Mark the debugger statement to be recognized by debugger (by the MajorKey)
-class DebuggerStatementStub : public CodeStub {
- public:
-  DebuggerStatementStub() { }
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  Major MajorKey() { return DebuggerStatement; }
-  int MinorKey() { return 0; }
-
-  const char* GetName() { return "DebuggerStatementStub"; }
-};
-
-
 class JSEntryStub : public CodeStub {
  public:
   JSEntryStub() { }
diff --git a/src/compiler.cc b/src/compiler.cc
index a5e1e5c..557a91e 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -38,20 +38,17 @@
 #include "rewriter.h"
 #include "scopes.h"
 #include "usage-analyzer.h"
+#include "liveedit.h"
 
 namespace v8 {
 namespace internal {
 
 
-static Handle<Code> MakeCode(FunctionLiteral* literal,
-                             Handle<Script> script,
-                             Handle<Context> context,
-                             bool is_eval,
-                             CompilationInfo* info) {
-  ASSERT(literal != NULL);
-
+static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
+  FunctionLiteral* function = info->function();
+  ASSERT(function != NULL);
   // Rewrite the AST by introducing .result assignments where needed.
-  if (!Rewriter::Process(literal) || !AnalyzeVariableUsage(literal)) {
+  if (!Rewriter::Process(function) || !AnalyzeVariableUsage(function)) {
     // Signal a stack overflow by returning a null handle.  The stack
     // overflow exception will be thrown by the caller.
     return Handle<Code>::null();
@@ -62,7 +59,7 @@
     // the top scope only contains the single lazily compiled function,
     // so this doesn't re-allocate variables repeatedly.
     HistogramTimerScope timer(&Counters::variable_allocation);
-    Scope* top = literal->scope();
+    Scope* top = info->scope();
     while (top->outer_scope() != NULL) top = top->outer_scope();
     top->AllocateVariables(context);
   }
@@ -71,12 +68,12 @@
   if (Bootstrapper::IsActive() ?
       FLAG_print_builtin_scopes :
       FLAG_print_scopes) {
-    literal->scope()->Print();
+    info->scope()->Print();
   }
 #endif
 
   // Optimize the AST.
-  if (!Rewriter::Optimize(literal)) {
+  if (!Rewriter::Optimize(function)) {
     // Signal a stack overflow by returning a null handle.  The stack
     // overflow exception will be thrown by the caller.
     return Handle<Code>::null();
@@ -98,25 +95,25 @@
 
   Handle<SharedFunctionInfo> shared = info->shared_info();
   bool is_run_once = (shared.is_null())
-      ? literal->scope()->is_global_scope()
+      ? info->scope()->is_global_scope()
       : (shared->is_toplevel() || shared->try_full_codegen());
 
   if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
     FullCodeGenSyntaxChecker checker;
-    checker.Check(literal);
+    checker.Check(function);
     if (checker.has_supported_syntax()) {
-      return FullCodeGenerator::MakeCode(literal, script, is_eval);
+      return FullCodeGenerator::MakeCode(info);
     }
   } else if (FLAG_always_fast_compiler ||
              (FLAG_fast_compiler && !is_run_once)) {
     FastCodeGenSyntaxChecker checker;
-    checker.Check(literal, info);
+    checker.Check(info);
     if (checker.has_supported_syntax()) {
-      return FastCodeGenerator::MakeCode(literal, script, is_eval, info);
+      return FastCodeGenerator::MakeCode(info);
     }
   }
 
-  return CodeGenerator::MakeCode(literal, script, is_eval, info);
+  return CodeGenerator::MakeCode(info);
 }
 
 
@@ -180,10 +177,8 @@
   HistogramTimerScope timer(rate);
 
   // Compile the code.
-  CompilationInfo info(Handle<SharedFunctionInfo>::null(),
-                       Handle<Object>::null(),  // No receiver.
-                       0);  // Not nested in a loop.
-  Handle<Code> code = MakeCode(lit, script, context, is_eval, &info);
+  CompilationInfo info(lit, script, is_eval);
+  Handle<Code> code = MakeCode(context, &info);
 
   // Check for stack-overflow exceptions.
   if (code.is_null()) {
@@ -243,7 +238,8 @@
                                      Handle<Object> script_name,
                                      int line_offset, int column_offset,
                                      v8::Extension* extension,
-                                     ScriptDataImpl* input_pre_data) {
+                                     ScriptDataImpl* input_pre_data,
+                                     Handle<Object> script_data) {
   int source_length = source->length();
   Counters::total_load_size.Increment(source_length);
   Counters::total_compile_size.Increment(source_length);
@@ -277,6 +273,9 @@
       script->set_column_offset(Smi::FromInt(column_offset));
     }
 
+    script->set_data(script_data.is_null() ? Heap::undefined_value()
+                                           : *script_data);
+
     // Compile the function and add it to the cache.
     result = MakeFunction(true,
                           false,
@@ -355,7 +354,6 @@
   // Compute name, source code and script data.
   Handle<SharedFunctionInfo> shared = info->shared_info();
   Handle<String> name(String::cast(shared->name()));
-  Handle<Script> script(Script::cast(shared->script()));
 
   int start_position = shared->start_position();
   int end_position = shared->end_position();
@@ -364,7 +362,8 @@
 
   // Generate the AST for the lazily compiled function. The AST may be
   // NULL in case of parser stack overflow.
-  FunctionLiteral* lit = MakeLazyAST(script, name,
+  FunctionLiteral* lit = MakeLazyAST(info->script(),
+                                     name,
                                      start_position,
                                      end_position,
                                      is_expression);
@@ -374,6 +373,7 @@
     ASSERT(Top::has_pending_exception());
     return false;
   }
+  info->set_function(lit);
 
   // Measure how long it takes to do the lazy compilation; only take
   // the rest of the function into account to avoid overlap with the
@@ -381,11 +381,7 @@
   HistogramTimerScope timer(&Counters::compile_lazy);
 
   // Compile the code.
-  Handle<Code> code = MakeCode(lit,
-                               script,
-                               Handle<Context>::null(),
-                               false,
-                               info);
+  Handle<Code> code = MakeCode(Handle<Context>::null(), info);
 
   // Check for stack-overflow exception.
   if (code.is_null()) {
@@ -394,28 +390,12 @@
   }
 
 #if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
-  // Log the code generation. If source information is available include script
-  // name and line number. Check explicit whether logging is enabled as finding
-  // the line number is not for free.
-  if (Logger::is_logging() || OProfileAgent::is_enabled()) {
-    Handle<String> func_name(name->length() > 0 ?
-                             *name : shared->inferred_name());
-    if (script->name()->IsString()) {
-      int line_num = GetScriptLineNumber(script, start_position) + 1;
-      LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name,
-                          String::cast(script->name()), line_num));
-      OProfileAgent::CreateNativeCodeRegion(*func_name,
-                                            String::cast(script->name()),
-                                            line_num,
-                                            code->instruction_start(),
-                                            code->instruction_size());
-    } else {
-      LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name));
-      OProfileAgent::CreateNativeCodeRegion(*func_name,
-                                            code->instruction_start(),
-                                            code->instruction_size());
-    }
-  }
+  LogCodeCreateEvent(Logger::LAZY_COMPILE_TAG,
+                     name,
+                     Handle<String>(shared->inferred_name()),
+                     start_position,
+                     info->script(),
+                     code);
 #endif
 
   // Update the shared function info with the compiled code.
@@ -450,7 +430,8 @@
   // compiled. These builtins cannot be handled lazily by the parser,
   // since we have to know if a function uses the special natives
   // syntax, which is something the parser records.
-  bool allow_lazy = literal->AllowsLazyCompilation();
+  bool allow_lazy = literal->AllowsLazyCompilation() &&
+      !LiveEditFunctionTracker::IsActive();
 
   // Generate code
   Handle<Code> code;
@@ -466,9 +447,7 @@
     // Generate code and return it.  The way that the compilation mode
     // is controlled by the command-line flags is described in
     // the static helper function MakeCode.
-    CompilationInfo info(Handle<SharedFunctionInfo>::null(),
-                         Handle<Object>::null(),  // No receiver.
-                         0);  // Not nested in a loop.
+    CompilationInfo info(literal, script, false);
 
     CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
     bool is_run_once = literal->try_full_codegen();
@@ -477,9 +456,7 @@
       FullCodeGenSyntaxChecker checker;
       checker.Check(literal);
       if (checker.has_supported_syntax()) {
-        code = FullCodeGenerator::MakeCode(literal,
-                                           script,
-                                           false);  // Not eval.
+        code = FullCodeGenerator::MakeCode(&info);
         is_compiled = true;
       }
     } else if (FLAG_always_fast_compiler ||
@@ -487,19 +464,16 @@
       // Since we are not lazily compiling we do not have a receiver to
       // specialize for.
       FastCodeGenSyntaxChecker checker;
-      checker.Check(literal, &info);
+      checker.Check(&info);
       if (checker.has_supported_syntax()) {
-        code = FastCodeGenerator::MakeCode(literal, script, false, &info);
+        code = FastCodeGenerator::MakeCode(&info);
         is_compiled = true;
       }
     }
 
     if (!is_compiled) {
       // We fall back to the classic V8 code generator.
-      code = CodeGenerator::MakeCode(literal,
-                                     script,
-                                     false,  // Not eval.
-                                     &info);
+      code = CodeGenerator::MakeCode(&info);
     }
 
     // Check for stack-overflow exception.
@@ -509,12 +483,14 @@
     }
 
     // Function compilation complete.
-    LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *literal->name()));
 
-#ifdef ENABLE_OPROFILE_AGENT
-    OProfileAgent::CreateNativeCodeRegion(*literal->name(),
-                                          code->instruction_start(),
-                                          code->instruction_size());
+#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
+    LogCodeCreateEvent(Logger::FUNCTION_TAG,
+                       literal->name(),
+                       literal->inferred_name(),
+                       literal->start_position(),
+                       script,
+                       code);
 #endif
   }
 
@@ -562,4 +538,35 @@
 }
 
 
+#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
+void Compiler::LogCodeCreateEvent(Logger::LogEventsAndTags tag,
+                                  Handle<String> name,
+                                  Handle<String> inferred_name,
+                                  int start_position,
+                                  Handle<Script> script,
+                                  Handle<Code> code) {
+  // Log the code generation. If source information is available
+  // include script name and line number. Check explicitly whether
+  // logging is enabled as finding the line number is not free.
+  if (Logger::is_logging() || OProfileAgent::is_enabled()) {
+    Handle<String> func_name(name->length() > 0 ? *name : *inferred_name);
+    if (script->name()->IsString()) {
+      int line_num = GetScriptLineNumber(script, start_position) + 1;
+      LOG(CodeCreateEvent(tag, *code, *func_name,
+                          String::cast(script->name()), line_num));
+      OProfileAgent::CreateNativeCodeRegion(*func_name,
+                                            String::cast(script->name()),
+                                            line_num,
+                                            code->instruction_start(),
+                                            code->instruction_size());
+    } else {
+      LOG(CodeCreateEvent(tag, *code, *func_name));
+      OProfileAgent::CreateNativeCodeRegion(*func_name,
+                                            code->instruction_start(),
+                                            code->instruction_size());
+    }
+  }
+}
+#endif
+
 } }  // namespace v8::internal
diff --git a/src/compiler.h b/src/compiler.h
index 19499de..6ee2246 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -28,45 +28,136 @@
 #ifndef V8_COMPILER_H_
 #define V8_COMPILER_H_
 
+#include "ast.h"
 #include "frame-element.h"
 #include "parser.h"
+#include "register-allocator.h"
 #include "zone.h"
 
 namespace v8 {
 namespace internal {
 
-// CompilationInfo encapsulates some information known at compile time.
+// CompilationInfo encapsulates some information known at compile time.  It
+// is constructed based on the resources available at compile-time.
 class CompilationInfo BASE_EMBEDDED {
  public:
-  CompilationInfo(Handle<SharedFunctionInfo> shared_info,
-                  Handle<Object> receiver,
-                  int loop_nesting)
-      : shared_info_(shared_info),
-        receiver_(receiver),
+  // Lazy compilation of a JSFunction.
+  CompilationInfo(Handle<JSFunction> closure,
+                  int loop_nesting,
+                  Handle<Object> receiver)
+      : closure_(closure),
+        function_(NULL),
+        is_eval_(false),
         loop_nesting_(loop_nesting),
-        has_this_properties_(false),
-        has_globals_(false) {
+        receiver_(receiver) {
+    Initialize();
+    ASSERT(!closure_.is_null() &&
+           shared_info_.is_null() &&
+           script_.is_null());
   }
 
-  Handle<SharedFunctionInfo> shared_info() { return shared_info_; }
+  // Lazy compilation based on SharedFunctionInfo.
+  explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info)
+      : shared_info_(shared_info),
+        function_(NULL),
+        is_eval_(false),
+        loop_nesting_(0) {
+    Initialize();
+    ASSERT(closure_.is_null() &&
+           !shared_info_.is_null() &&
+           script_.is_null());
+  }
 
+  // Eager compilation.
+  CompilationInfo(FunctionLiteral* literal, Handle<Script> script, bool is_eval)
+      : script_(script),
+        function_(literal),
+        is_eval_(is_eval),
+        loop_nesting_(0) {
+    Initialize();
+    ASSERT(closure_.is_null() &&
+           shared_info_.is_null() &&
+           !script_.is_null());
+  }
+
+  // We can only get a JSFunction if we actually have one.
+  Handle<JSFunction> closure() { return closure_; }
+
+  // We can get a SharedFunctionInfo from a JSFunction or if we actually
+  // have one.
+  Handle<SharedFunctionInfo> shared_info() {
+    if (!closure().is_null()) {
+      return Handle<SharedFunctionInfo>(closure()->shared());
+    } else {
+      return shared_info_;
+    }
+  }
+
+  // We can always get a script.  Either we have one or we can get a shared
+  // function info.
+  Handle<Script> script() {
+    if (!script_.is_null()) {
+      return script_;
+    } else {
+      ASSERT(shared_info()->script()->IsScript());
+      return Handle<Script>(Script::cast(shared_info()->script()));
+    }
+  }
+
+  // There should always be a function literal, but it may be set after
+  // construction (for lazy compilation).
+  FunctionLiteral* function() { return function_; }
+  void set_function(FunctionLiteral* literal) {
+    ASSERT(function_ == NULL);
+    function_ = literal;
+  }
+
+  // Simple accessors.
+  bool is_eval() { return is_eval_; }
+  int loop_nesting() { return loop_nesting_; }
   bool has_receiver() { return !receiver_.is_null(); }
   Handle<Object> receiver() { return receiver_; }
 
-  int loop_nesting() { return loop_nesting_; }
-
+  // Accessors for mutable fields, possibly set by analysis passes with
+  // default values given by Initialize.
   bool has_this_properties() { return has_this_properties_; }
   void set_has_this_properties(bool flag) { has_this_properties_ = flag; }
 
+  bool has_global_object() {
+    return !closure().is_null() && (closure()->context()->global() != NULL);
+  }
+
+  GlobalObject* global_object() {
+    return has_global_object() ? closure()->context()->global() : NULL;
+  }
+
   bool has_globals() { return has_globals_; }
   void set_has_globals(bool flag) { has_globals_ = flag; }
 
+  // Derived accessors.
+  Scope* scope() { return function()->scope(); }
+
  private:
+  void Initialize() {
+    has_this_properties_ = false;
+    has_globals_ = false;
+  }
+
+  Handle<JSFunction> closure_;
   Handle<SharedFunctionInfo> shared_info_;
-  Handle<Object> receiver_;
+  Handle<Script> script_;
+
+  FunctionLiteral* function_;
+
+  bool is_eval_;
   int loop_nesting_;
+
+  Handle<Object> receiver_;
+
   bool has_this_properties_;
   bool has_globals_;
+
+  DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
 };
 
 
@@ -94,7 +185,8 @@
                                     Handle<Object> script_name,
                                     int line_offset, int column_offset,
                                     v8::Extension* extension,
-                                    ScriptDataImpl* script_Data);
+                                    ScriptDataImpl* pre_data,
+                                    Handle<Object> script_data);
 
   // Compile a String source within a context for Eval.
   static Handle<JSFunction> CompileEval(Handle<String> source,
@@ -119,6 +211,17 @@
                               FunctionLiteral* lit,
                               bool is_toplevel,
                               Handle<Script> script);
+
+ private:
+
+#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
+  static void LogCodeCreateEvent(Logger::LogEventsAndTags tag,
+                                 Handle<String> name,
+                                 Handle<String> inferred_name,
+                                 int start_position,
+                                 Handle<Script> script,
+                                 Handle<Code> code);
+#endif
 };
 
 
diff --git a/src/contexts.h b/src/contexts.h
index 66c1575..9baf072 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -76,7 +76,6 @@
   V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun) \
   V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \
   V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \
-  V(TO_BOOLEAN_FUN_INDEX, JSFunction, to_boolean_fun) \
   V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
   V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
   V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
diff --git a/src/d8-readline.cc b/src/d8-readline.cc
index 34b7b60..67fc9ef 100644
--- a/src/d8-readline.cc
+++ b/src/d8-readline.cc
@@ -27,8 +27,8 @@
 
 
 #include <cstdio>  // NOLINT
-#include <readline/readline.h>
-#include <readline/history.h>
+#include <readline/readline.h> // NOLINT
+#include <readline/history.h> // NOLINT
 
 
 #include "d8.h"
diff --git a/src/data-flow.cc b/src/data-flow.cc
index 0e30b31..5e9d217 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -33,8 +33,9 @@
 namespace internal {
 
 
-void AstLabeler::Label(FunctionLiteral* fun) {
-  VisitStatements(fun->body());
+void AstLabeler::Label(CompilationInfo* info) {
+  info_ = info;
+  VisitStatements(info_->function()->body());
 }
 
 
@@ -162,6 +163,10 @@
 
 void AstLabeler::VisitVariableProxy(VariableProxy* expr) {
   expr->set_num(next_number_++);
+  Variable* var = expr->var();
+  if (var->is_global() && !var->is_this()) {
+    info_->set_has_globals(true);
+  }
 }
 
 
@@ -194,15 +199,11 @@
 void AstLabeler::VisitAssignment(Assignment* expr) {
   Property* prop = expr->target()->AsProperty();
   ASSERT(prop != NULL);
-  if (prop != NULL) {
-    ASSERT(prop->key()->IsPropertyName());
-    VariableProxy* proxy = prop->obj()->AsVariableProxy();
-    if (proxy != NULL && proxy->var()->is_this()) {
-      has_this_properties_ = true;
-    } else {
-      Visit(prop->obj());
-    }
-  }
+  ASSERT(prop->key()->IsPropertyName());
+  VariableProxy* proxy = prop->obj()->AsVariableProxy();
+  USE(proxy);
+  ASSERT(proxy != NULL && proxy->var()->is_this());
+  info()->set_has_this_properties(true);
   Visit(expr->value());
   expr->set_num(next_number_++);
 }
@@ -214,7 +215,12 @@
 
 
 void AstLabeler::VisitProperty(Property* expr) {
-  UNREACHABLE();
+  ASSERT(expr->key()->IsPropertyName());
+  VariableProxy* proxy = expr->obj()->AsVariableProxy();
+  USE(proxy);
+  ASSERT(proxy != NULL && proxy->var()->is_this());
+  info()->set_has_this_properties(true);
+  expr->set_num(next_number_++);
 }
 
 
@@ -264,4 +270,292 @@
   UNREACHABLE();
 }
 
+
+ZoneList<Expression*>* VarUseMap::Lookup(Variable* var) {
+  HashMap::Entry* entry = HashMap::Lookup(var, var->name()->Hash(), true);
+  if (entry->value == NULL) {
+    entry->value = new ZoneList<Expression*>(1);
+  }
+  return reinterpret_cast<ZoneList<Expression*>*>(entry->value);
+}
+
+
+void LivenessAnalyzer::Analyze(FunctionLiteral* fun) {
+  // Process the function body.
+  VisitStatements(fun->body());
+
+  // All variables are implicitly defined at the function start.
+  // Record a definition of all variables live at function entry.
+  for (HashMap::Entry* p = live_vars_.Start();
+       p != NULL;
+       p = live_vars_.Next(p)) {
+    Variable* var = reinterpret_cast<Variable*>(p->key);
+    RecordDef(var, fun);
+  }
+}
+
+
+void LivenessAnalyzer::VisitStatements(ZoneList<Statement*>* stmts) {
+  // Visit statements right-to-left.
+  for (int i = stmts->length() - 1; i >= 0; i--) {
+    Visit(stmts->at(i));
+  }
+}
+
+
+void LivenessAnalyzer::RecordUse(Variable* var, Expression* expr) {
+  ASSERT(var->is_global() || var->is_this());
+  ZoneList<Expression*>* uses = live_vars_.Lookup(var);
+  uses->Add(expr);
+}
+
+
+void LivenessAnalyzer::RecordDef(Variable* var, Expression* expr) {
+  ASSERT(var->is_global() || var->is_this());
+
+  // We do not support other expressions that can define variables.
+  ASSERT(expr->AsFunctionLiteral() != NULL);
+
+  // Add the variable to the list of defined variables.
+  if (expr->defined_vars() == NULL) {
+    expr->set_defined_vars(new ZoneList<DefinitionInfo*>(1));
+  }
+  DefinitionInfo* def = new DefinitionInfo();
+  expr->AsFunctionLiteral()->defined_vars()->Add(def);
+
+  // Compute the last use of the definition. The variable uses are
+  // inserted in reversed evaluation order. The first element
+  // in the list of live uses is the last use.
+  ZoneList<Expression*>* uses = live_vars_.Lookup(var);
+  while (uses->length() > 0) {
+    Expression* use_site = uses->RemoveLast();
+    use_site->set_var_def(def);
+    if (uses->length() == 0) {
+      def->set_last_use(use_site);
+    }
+  }
+}
+
+
+// Visitor functions for live variable analysis.
+void LivenessAnalyzer::VisitDeclaration(Declaration* decl) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitBlock(Block* stmt) {
+  VisitStatements(stmt->statements());
+}
+
+
+void LivenessAnalyzer::VisitExpressionStatement(
+    ExpressionStatement* stmt) {
+  Visit(stmt->expression());
+}
+
+
+void LivenessAnalyzer::VisitEmptyStatement(EmptyStatement* stmt) {
+  // Do nothing.
+}
+
+
+void LivenessAnalyzer::VisitIfStatement(IfStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitContinueStatement(ContinueStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitBreakStatement(BreakStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitReturnStatement(ReturnStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitWithEnterStatement(
+    WithEnterStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitWithExitStatement(WithExitStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitSwitchStatement(SwitchStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitForStatement(ForStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitForInStatement(ForInStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitTryFinallyStatement(
+    TryFinallyStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitDebuggerStatement(
+    DebuggerStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitConditional(Conditional* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitSlot(Slot* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitVariableProxy(VariableProxy* expr) {
+  Variable* var = expr->var();
+  ASSERT(var->is_global());
+  ASSERT(!var->is_this());
+  RecordUse(var, expr);
+}
+
+
+void LivenessAnalyzer::VisitLiteral(Literal* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitRegExpLiteral(RegExpLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitObjectLiteral(ObjectLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitArrayLiteral(ArrayLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitCatchExtensionObject(
+    CatchExtensionObject* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitAssignment(Assignment* expr) {
+  Property* prop = expr->target()->AsProperty();
+  ASSERT(prop != NULL);
+  ASSERT(prop->key()->IsPropertyName());
+  VariableProxy* proxy = prop->obj()->AsVariableProxy();
+  ASSERT(proxy != NULL && proxy->var()->is_this());
+
+  // Record use of this at the assignment node. Assignments to
+  // this-properties are treated like unary operations.
+  RecordUse(proxy->var(), expr);
+
+  // Visit right-hand side.
+  Visit(expr->value());
+}
+
+
+void LivenessAnalyzer::VisitThrow(Throw* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitProperty(Property* expr) {
+  ASSERT(expr->key()->IsPropertyName());
+  VariableProxy* proxy = expr->obj()->AsVariableProxy();
+  ASSERT(proxy != NULL && proxy->var()->is_this());
+  RecordUse(proxy->var(), expr);
+}
+
+
+void LivenessAnalyzer::VisitCall(Call* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitCallNew(CallNew* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitCallRuntime(CallRuntime* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitCountOperation(CountOperation* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
+  // Visit child nodes in reverse evaluation order.
+  Visit(expr->right());
+  Visit(expr->left());
+}
+
+
+void LivenessAnalyzer::VisitCompareOperation(CompareOperation* expr) {
+  UNREACHABLE();
+}
+
+
+void LivenessAnalyzer::VisitThisFunction(ThisFunction* expr) {
+  UNREACHABLE();
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/data-flow.h b/src/data-flow.h
index ac83503..2331944 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -29,7 +29,7 @@
 #define V8_DATAFLOW_H_
 
 #include "ast.h"
-#include "scopes.h"
+#include "compiler.h"
 
 namespace v8 {
 namespace internal {
@@ -38,13 +38,13 @@
 // their evaluation order (post-order left-to-right traversal).
 class AstLabeler: public AstVisitor {
  public:
-  AstLabeler() : next_number_(0), has_this_properties_(false) {}
+  AstLabeler() : next_number_(0) {}
 
-  void Label(FunctionLiteral* fun);
-
-  bool has_this_properties() { return has_this_properties_; }
+  void Label(CompilationInfo* info);
 
  private:
+  CompilationInfo* info() { return info_; }
+
   void VisitDeclarations(ZoneList<Declaration*>* decls);
   void VisitStatements(ZoneList<Statement*>* stmts);
 
@@ -56,12 +56,62 @@
   // Traversal number for labelling AST nodes.
   int next_number_;
 
-  bool has_this_properties_;
+  CompilationInfo* info_;
 
   DISALLOW_COPY_AND_ASSIGN(AstLabeler);
 };
 
 
+class VarUseMap : public HashMap {
+ public:
+  VarUseMap() : HashMap(VarMatch) {}
+
+  ZoneList<Expression*>* Lookup(Variable* var);
+
+ private:
+  static bool VarMatch(void* key1, void* key2) { return key1 == key2; }
+};
+
+
+class DefinitionInfo : public ZoneObject {
+ public:
+  explicit DefinitionInfo() : last_use_(NULL) {}
+
+  Expression* last_use() { return last_use_; }
+  void set_last_use(Expression* expr) { last_use_ = expr; }
+
+ private:
+  Expression* last_use_;
+  Register location_;
+};
+
+
+class LivenessAnalyzer : public AstVisitor {
+ public:
+  LivenessAnalyzer() {}
+
+  void Analyze(FunctionLiteral* fun);
+
+ private:
+  void VisitStatements(ZoneList<Statement*>* stmts);
+
+  void RecordUse(Variable* var, Expression* expr);
+  void RecordDef(Variable* var, Expression* expr);
+
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  // Map for tracking the live variables.
+  VarUseMap live_vars_;
+
+  DISALLOW_COPY_AND_ASSIGN(LivenessAnalyzer);
+};
+
+
 } }  // namespace v8::internal
 
+
 #endif  // V8_DATAFLOW_H_
diff --git a/src/debug-delay.js b/src/debug-delay.js
index 14d8c88..754ac5d 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -1934,10 +1934,14 @@
   if (isNaN(modules)) {
     return response.failed('Modules is not an integer');
   }
+  var tag = parseInt(request.arguments.tag);
+  if (isNaN(tag)) {
+    tag = 0;
+  }
   if (request.arguments.command == 'resume') {
-    %ProfilerResume(modules);
+    %ProfilerResume(modules, tag);
   } else if (request.arguments.command == 'pause') {
-    %ProfilerPause(modules);
+    %ProfilerPause(modules, tag);
   } else {
     return response.failed('Unknown command');
   }
diff --git a/src/debug.cc b/src/debug.cc
index fb9b23e..8c4f51d 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -31,6 +31,7 @@
 #include "arguments.h"
 #include "bootstrapper.h"
 #include "code-stubs.h"
+#include "codegen.h"
 #include "compilation-cache.h"
 #include "compiler.h"
 #include "debug.h"
@@ -453,15 +454,7 @@
 
 
 bool BreakLocationIterator::IsDebuggerStatement() {
-  if (RelocInfo::IsCodeTarget(rmode())) {
-    Address target = original_rinfo()->target_address();
-    Code* code = Code::GetCodeFromTargetAddress(target);
-    if (code->kind() == Code::STUB) {
-      CodeStub::Major major_key = code->major_key();
-      return (major_key == CodeStub::DebuggerStatement);
-    }
-  }
-  return false;
+  return RelocInfo::DEBUG_BREAK == rmode();
 }
 
 
@@ -690,7 +683,8 @@
   bool allow_natives_syntax = FLAG_allow_natives_syntax;
   FLAG_allow_natives_syntax = true;
   Handle<JSFunction> boilerplate;
-  boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL);
+  boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL,
+                                  Handle<String>::null());
   FLAG_allow_natives_syntax = allow_natives_syntax;
 
   // Silently ignore stack overflows during compilation.
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 50f3eb9..8473cd9 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -261,7 +261,7 @@
             ASSERT(code->major_key() == CodeStub::MajorKeyFromKey(key));
             out.AddFormatted(" %s, %s, ",
                              Code::Kind2String(kind),
-                             CodeStub::MajorName(code->major_key()));
+                             CodeStub::MajorName(code->major_key(), false));
             switch (code->major_key()) {
               case CodeStub::CallFunction:
                 out.AddFormatted("argc = %d", minor_key);
diff --git a/src/execution.cc b/src/execution.cc
index a79af23..2068413 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -91,7 +91,7 @@
     JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
 
     // Call the function through the right JS entry stub.
-    byte* entry_address= func->code()->entry();
+    byte* entry_address = func->code()->entry();
     JSFunction* function = *func;
     Object* receiver_pointer = *receiver;
     value = CALL_GENERATED_CODE(entry, entry_address, function,
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
index 4e6f259..ecd2652 100644
--- a/src/fast-codegen.cc
+++ b/src/fast-codegen.cc
@@ -51,8 +51,7 @@
   } while (false)
 
 
-void FastCodeGenSyntaxChecker::Check(FunctionLiteral* fun,
-                                     CompilationInfo* info) {
+void FastCodeGenSyntaxChecker::Check(CompilationInfo* info) {
   info_ = info;
 
   // We do not specialize if we do not have a receiver or if it is not a
@@ -64,7 +63,7 @@
 
   // We do not support stack or heap slots (both of which require
   // allocation).
-  Scope* scope = fun->scope();
+  Scope* scope = info->scope();
   if (scope->num_stack_slots() > 0) {
     BAILOUT("Function has stack-allocated locals");
   }
@@ -76,8 +75,10 @@
   CHECK_BAILOUT;
 
   // We do not support empty function bodies.
-  if (fun->body()->is_empty()) BAILOUT("Function has an empty body");
-  VisitStatements(fun->body());
+  if (info->function()->body()->is_empty()) {
+    BAILOUT("Function has an empty body");
+  }
+  VisitStatements(info->function()->body());
 }
 
 
@@ -88,10 +89,10 @@
 
 
 void FastCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
-  for (int i = 0, len = stmts->length(); i < len; i++) {
-    Visit(stmts->at(i));
-    CHECK_BAILOUT;
+  if (stmts->length() != 1) {
+    BAILOUT("Function body is not a singleton statement.");
   }
+  Visit(stmts->at(0));
 }
 
 
@@ -213,7 +214,24 @@
 void FastCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
   // Only global variable references are supported.
   Variable* var = expr->var();
-  if (!var->is_global()) BAILOUT("Non-global variable");
+  if (!var->is_global() || var->is_this()) BAILOUT("Non-global variable");
+
+  // Check if the global variable is existing and non-deletable.
+  if (info()->has_global_object()) {
+    LookupResult lookup;
+    info()->global_object()->Lookup(*expr->name(), &lookup);
+    if (!lookup.IsProperty()) {
+      BAILOUT("Non-existing global variable");
+    }
+    // We do not handle global variables with accessors or interceptors.
+    if (lookup.type() != NORMAL) {
+      BAILOUT("Global variable with accessors or interceptors.");
+    }
+    // We do not handle deletable global variables.
+    if (!lookup.IsDontDelete()) {
+      BAILOUT("Deletable global variable");
+    }
+  }
 }
 
 
@@ -266,6 +284,9 @@
     Handle<String> name = Handle<String>::cast(key->handle());
     LookupResult lookup;
     receiver->Lookup(*name, &lookup);
+    if (!lookup.IsProperty()) {
+      BAILOUT("Assigned property not found at compile time");
+    }
     if (lookup.holder() != *receiver) BAILOUT("Non-own property assignment");
     if (!lookup.type() == FIELD) BAILOUT("Non-field property assignment");
   } else {
@@ -283,7 +304,33 @@
 
 
 void FastCodeGenSyntaxChecker::VisitProperty(Property* expr) {
-  BAILOUT("Property");
+  // We support named this property references.
+  VariableProxy* proxy = expr->obj()->AsVariableProxy();
+  if (proxy == NULL || !proxy->var()->is_this()) {
+    BAILOUT("Non-this-property reference");
+  }
+  if (!expr->key()->IsPropertyName()) {
+    BAILOUT("Non-named-property reference");
+  }
+
+  // We will only specialize for fields on the object itself.
+  // Expression::IsPropertyName implies that the name is a literal
+  // symbol but we do not assume that.
+  Literal* key = expr->key()->AsLiteral();
+  if (key != NULL && key->handle()->IsString()) {
+    Handle<Object> receiver = info()->receiver();
+    Handle<String> name = Handle<String>::cast(key->handle());
+    LookupResult lookup;
+    receiver->Lookup(*name, &lookup);
+    if (!lookup.IsProperty()) {
+      BAILOUT("Referenced property not found at compile time");
+    }
+    if (lookup.holder() != *receiver) BAILOUT("Non-own property reference");
+    if (!lookup.type() == FIELD) BAILOUT("Non-field property reference");
+  } else {
+    UNREACHABLE();
+    BAILOUT("Unexpected non-string-literal property key");
+  }
 }
 
 
@@ -313,7 +360,58 @@
 
 
 void FastCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
-  BAILOUT("BinaryOperation");
+  // We support bitwise OR.
+  switch (expr->op()) {
+    case Token::COMMA:
+      BAILOUT("BinaryOperation COMMA");
+    case Token::OR:
+      BAILOUT("BinaryOperation OR");
+    case Token::AND:
+      BAILOUT("BinaryOperation AND");
+
+    case Token::BIT_OR:
+      // We support expressions nested on the left because they only require
+      // a pair of registers to keep all intermediate values in registers
+      // (i.e., the expression stack has height no more than two).
+      if (!expr->right()->IsLeaf()) BAILOUT("expression nested on right");
+
+      // We do not allow subexpressions with side effects because we
+      // (currently) bail out to the beginning of the full function.  The
+      // only expressions with side effects that we would otherwise handle
+      // are assignments.
+      if (expr->left()->AsAssignment() != NULL ||
+          expr->right()->AsAssignment() != NULL) {
+        BAILOUT("subexpression of binary operation has side effects");
+      }
+
+      Visit(expr->left());
+      CHECK_BAILOUT;
+      Visit(expr->right());
+      break;
+
+    case Token::BIT_XOR:
+      BAILOUT("BinaryOperation BIT_XOR");
+    case Token::BIT_AND:
+      BAILOUT("BinaryOperation BIT_AND");
+    case Token::SHL:
+      BAILOUT("BinaryOperation SHL");
+    case Token::SAR:
+      BAILOUT("BinaryOperation SAR");
+    case Token::SHR:
+      BAILOUT("BinaryOperation SHR");
+    case Token::ADD:
+      BAILOUT("BinaryOperation ADD");
+    case Token::SUB:
+      BAILOUT("BinaryOperation SUB");
+    case Token::MUL:
+      BAILOUT("BinaryOperation MUL");
+    case Token::DIV:
+      BAILOUT("BinaryOperation DIV");
+    case Token::MOD:
+      BAILOUT("BinaryOperation MOD");
+    default:
+      UNREACHABLE();
+  }
 }
 
 
@@ -332,24 +430,23 @@
 
 #define __ ACCESS_MASM(masm())
 
-Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
-                                         Handle<Script> script,
-                                         bool is_eval,
-                                         CompilationInfo* info) {
+Handle<Code> FastCodeGenerator::MakeCode(CompilationInfo* info) {
   // Label the AST before calling MakeCodePrologue, so AST node numbers are
   // printed with the AST.
   AstLabeler labeler;
-  labeler.Label(fun);
-  info->set_has_this_properties(labeler.has_this_properties());
+  labeler.Label(info);
 
-  CodeGenerator::MakeCodePrologue(fun);
+  LivenessAnalyzer analyzer;
+  analyzer.Analyze(info->function());
+
+  CodeGenerator::MakeCodePrologue(info);
 
   const int kInitialBufferSize = 4 * KB;
   MacroAssembler masm(NULL, kInitialBufferSize);
 
   // Generate the fast-path code.
-  FastCodeGenerator fast_cgen(&masm, script, is_eval);
-  fast_cgen.Generate(fun, info);
+  FastCodeGenerator fast_cgen(&masm);
+  fast_cgen.Generate(info);
   if (fast_cgen.HasStackOverflow()) {
     ASSERT(!Top::has_pending_exception());
     return Handle<Code>::null();
@@ -357,16 +454,16 @@
 
   // Generate the full code for the function in bailout mode, using the same
   // macro assembler.
-  CodeGenerator cgen(&masm, script, is_eval);
+  CodeGenerator cgen(&masm);
   CodeGeneratorScope scope(&cgen);
-  cgen.Generate(fun, CodeGenerator::SECONDARY, info);
+  cgen.Generate(info, CodeGenerator::SECONDARY);
   if (cgen.HasStackOverflow()) {
     ASSERT(!Top::has_pending_exception());
     return Handle<Code>::null();
   }
 
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
-  return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+  return CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
 }
 
 
@@ -483,12 +580,28 @@
 
 void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   ASSERT(expr->var()->is_global() && !expr->var()->is_this());
-  Comment cmnt(masm(), ";; Global");
-  if (FLAG_print_ir) {
-    SmartPointer<char> name = expr->name()->ToCString();
-    PrintF("%d: t%d = Global(%s)\n", expr->num(), expr->num(), *name);
+  // Check if we can compile a global variable load directly from the cell.
+  ASSERT(info()->has_global_object());
+  LookupResult lookup;
+  info()->global_object()->Lookup(*expr->name(), &lookup);
+  // We only support normal (non-accessor/interceptor) DontDelete properties
+  // for now.
+  ASSERT(lookup.IsProperty());
+  ASSERT_EQ(NORMAL, lookup.type());
+  ASSERT(lookup.IsDontDelete());
+  Handle<Object> cell(info()->global_object()->GetPropertyCell(&lookup));
+
+  // Global variable lookups do not have side effects, so we do not need to
+  // emit code if we are in an effect context.
+  if (!destination().is(no_reg)) {
+    Comment cmnt(masm(), ";; Global");
+    if (FLAG_print_ir) {
+      SmartPointer<char> name = expr->name()->ToCString();
+      PrintF("%d: t%d = Global(%s)  // last_use = %d\n", expr->num(),
+             expr->num(), *name, expr->var_def()->last_use()->num());
+    }
+    EmitGlobalVariableLoad(cell);
   }
-  EmitGlobalVariableLoad(expr->name());
 }
 
 
@@ -518,8 +631,13 @@
 
 
 void FastCodeGenerator::VisitAssignment(Assignment* expr) {
-  // Known to be a simple this property assignment.
-  Visit(expr->value());
+  // Known to be a simple this property assignment.  Effectively a unary
+  // operation.
+  { Register my_destination = destination();
+    set_destination(accumulator0());
+    Visit(expr->value());
+    set_destination(my_destination);
+  }
 
   Property* prop = expr->target()->AsProperty();
   ASSERT_NOT_NULL(prop);
@@ -529,11 +647,14 @@
   Handle<String> name =
       Handle<String>::cast(prop->key()->AsLiteral()->handle());
 
-  Comment cmnt(masm(), ";; Store(this)");
+  Comment cmnt(masm(), ";; Store to this");
   if (FLAG_print_ir) {
     SmartPointer<char> name_string = name->ToCString();
-    PrintF("%d: t%d = Store(this, \"%s\", t%d)\n",
-           expr->num(), expr->num(), *name_string, expr->value()->num());
+    PrintF("%d: ", expr->num());
+    if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
+    PrintF("Store(this, \"%s\", t%d)  // last_use(this) = %d\n", *name_string,
+           expr->value()->num(),
+           expr->var_def()->last_use()->num());
   }
 
   EmitThisPropertyStore(name);
@@ -546,7 +667,22 @@
 
 
 void FastCodeGenerator::VisitProperty(Property* expr) {
-  UNREACHABLE();
+  ASSERT_NOT_NULL(expr->obj()->AsVariableProxy());
+  ASSERT(expr->obj()->AsVariableProxy()->var()->is_this());
+  ASSERT(expr->key()->IsPropertyName());
+  if (!destination().is(no_reg)) {
+    Handle<String> name =
+        Handle<String>::cast(expr->key()->AsLiteral()->handle());
+
+    Comment cmnt(masm(), ";; Load from this");
+    if (FLAG_print_ir) {
+      SmartPointer<char> name_string = name->ToCString();
+      PrintF("%d: t%d = Load(this, \"%s\")  // last_use(this) = %d\n",
+             expr->num(), expr->num(), *name_string,
+             expr->var_def()->last_use()->num());
+    }
+    EmitThisPropertyLoad(name);
+  }
 }
 
 
@@ -576,7 +712,26 @@
 
 
 void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
-  UNREACHABLE();
+  // We support limited binary operations: bitwise OR only allowed to be
+  // nested on the left.
+  ASSERT(expr->op() == Token::BIT_OR);
+  ASSERT(expr->right()->IsLeaf());
+
+  { Register my_destination = destination();
+    set_destination(accumulator1());
+    Visit(expr->left());
+    set_destination(accumulator0());
+    Visit(expr->right());
+    set_destination(my_destination);
+  }
+
+  Comment cmnt(masm(), ";; BIT_OR");
+  if (FLAG_print_ir) {
+    PrintF("%d: ", expr->num());
+    if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
+    PrintF("BIT_OR(t%d, t%d)\n", expr->left()->num(), expr->right()->num());
+  }
+  EmitBitOr();
 }
 
 
diff --git a/src/fast-codegen.h b/src/fast-codegen.h
index b40f6fb..96ee5dd 100644
--- a/src/fast-codegen.h
+++ b/src/fast-codegen.h
@@ -42,7 +42,7 @@
       : info_(NULL), has_supported_syntax_(true) {
   }
 
-  void Check(FunctionLiteral* fun, CompilationInfo* info);
+  void Check(CompilationInfo* info);
 
   CompilationInfo* info() { return info_; }
   bool has_supported_syntax() { return has_supported_syntax_; }
@@ -65,62 +65,86 @@
 
 class FastCodeGenerator: public AstVisitor {
  public:
-  FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
-      : masm_(masm),
-        script_(script),
-        is_eval_(is_eval),
-        function_(NULL),
-        info_(NULL) {
+  explicit FastCodeGenerator(MacroAssembler* masm)
+      : masm_(masm), info_(NULL), destination_(no_reg), smi_bits_(0) {
   }
 
-  static Handle<Code> MakeCode(FunctionLiteral* fun,
-                               Handle<Script> script,
-                               bool is_eval,
-                               CompilationInfo* info);
+  static Handle<Code> MakeCode(CompilationInfo* info);
 
-  void Generate(FunctionLiteral* fun, CompilationInfo* info);
+  void Generate(CompilationInfo* compilation_info);
 
  private:
   MacroAssembler* masm() { return masm_; }
-  FunctionLiteral* function() { return function_; }
+  CompilationInfo* info() { return info_; }
   Label* bailout() { return &bailout_; }
 
-  bool has_receiver() { return !info_->receiver().is_null(); }
-  Handle<Object> receiver() { return info_->receiver(); }
-  bool has_this_properties() { return info_->has_this_properties(); }
+  Register destination() { return destination_; }
+  void set_destination(Register reg) { destination_ = reg; }
+
+  FunctionLiteral* function() { return info_->function(); }
+  Scope* scope() { return info_->scope(); }
+
+  // Platform-specific fixed registers, all guaranteed distinct.
+  Register accumulator0();
+  Register accumulator1();
+  Register scratch0();
+  Register scratch1();
+  Register receiver_reg();
+  Register context_reg();
+
+  Register other_accumulator(Register reg) {
+    ASSERT(reg.is(accumulator0()) || reg.is(accumulator1()));
+    return (reg.is(accumulator0())) ? accumulator1() : accumulator0();
+  }
+
+  // Flags are true if the respective register is statically known to hold a
+  // smi.  We do not track every register, only the accumulator registers.
+  bool is_smi(Register reg) {
+    ASSERT(!reg.is(no_reg));
+    return (smi_bits_ & reg.bit()) != 0;
+  }
+  void set_as_smi(Register reg) {
+    ASSERT(!reg.is(no_reg));
+    smi_bits_ = smi_bits_ | reg.bit();
+  }
+  void clear_as_smi(Register reg) {
+    ASSERT(!reg.is(no_reg));
+    smi_bits_ = smi_bits_ & ~reg.bit();
+  }
 
   // AST node visit functions.
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
 
-  // Emit code to load the receiver from the stack into a given register.
-  void EmitLoadReceiver(Register reg);
+  // Emit code to load the receiver from the stack into receiver_reg.
+  void EmitLoadReceiver();
 
-  // Emit code to check that the receiver has the same map as the
-  // compile-time receiver.  Receiver is expected in {ia32-edx, x64-rdx,
-  // arm-r1}.  Emit a branch to the (single) bailout label if check fails.
-  void EmitReceiverMapCheck();
-
-  // Emit code to load a global variable value into {is32-eax, x64-rax,
-  // arm-r0}.  Register {ia32-edx, x64-rdx, arm-r1} is preserved if it is
-  // holding the receiver and {is32-ecx, x64-rcx, arm-r2} is always
-  // clobbered.
-  void EmitGlobalVariableLoad(Handle<String> name);
+  // Emit code to load a global variable directly from a global property
+  // cell into the destination register.
+  void EmitGlobalVariableLoad(Handle<Object> cell);
 
   // Emit a store to an own property of this.  The stored value is expected
-  // in {ia32-eax, x64-rax, arm-r0} and the receiver in {is32-edx, x64-rdx,
-  // arm-r1}.  Both are preserve.
+  // in accumulator0 and the receiver in receiver_reg.  The receiver
+  // register is preserved and the result (the stored value) is left in the
+  // destination register.
   void EmitThisPropertyStore(Handle<String> name);
 
+  // Emit a load from an own property of this.  The receiver is expected in
+  // receiver_reg.  The receiver register is preserved and the result is
+  // left in the destination register.
+  void EmitThisPropertyLoad(Handle<String> name);
+
+  // Emit a bitwise or operation.  The left operand is in accumulator1 and
+  // the right is in accumulator0.  The result should be left in the
+  // destination register.
+  void EmitBitOr();
+
   MacroAssembler* masm_;
-  Handle<Script> script_;
-  bool is_eval_;
-
-  FunctionLiteral* function_;
   CompilationInfo* info_;
-
   Label bailout_;
+  Register destination_;
+  uint32_t smi_bits_;
 
   DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
 };
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index b57f2cb..6e22d5b 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -116,6 +116,8 @@
             "enable use of SAHF instruction if available (X64 only)")
 DEFINE_bool(enable_vfp3, true,
             "enable use of VFP3 instructions if available (ARM only)")
+DEFINE_bool(enable_armv7, true,
+            "enable use of ARMv7 instructions if available (ARM only)")
 
 // bootstrapper.cc
 DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
@@ -218,7 +220,7 @@
 // rewriter.cc
 DEFINE_bool(optimize_ast, true, "optimize the ast")
 
-// simulator-arm.cc
+// simulator-arm.cc and simulator-mips.cc
 DEFINE_bool(trace_sim, false, "trace simulator execution")
 DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
 
diff --git a/src/frame-element.cc b/src/frame-element.cc
index e6bc2ea..1455559 100644
--- a/src/frame-element.cc
+++ b/src/frame-element.cc
@@ -32,10 +32,6 @@
 namespace v8 {
 namespace internal {
 
-// -------------------------------------------------------------------------
-// FrameElement implementation.
-
-
 FrameElement::ZoneObjectList* FrameElement::ConstantList() {
   static ZoneObjectList list(10);
   return &list;
diff --git a/src/frame-element.h b/src/frame-element.h
index ccdecf1..3ae6d30 100644
--- a/src/frame-element.h
+++ b/src/frame-element.h
@@ -28,7 +28,8 @@
 #ifndef V8_FRAME_ELEMENT_H_
 #define V8_FRAME_ELEMENT_H_
 
-#include "register-allocator-inl.h"
+#include "number-info.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -52,11 +53,28 @@
     SYNCED
   };
 
+  inline NumberInfo::Type number_info() {
+    // Copied elements do not have number info. Instead
+    // we have to inspect their backing element in the frame.
+    ASSERT(!is_copy());
+    if (!is_constant()) return NumberInfoField::decode(value_);
+    Handle<Object> value = handle();
+    if (value->IsSmi()) return NumberInfo::kSmi;
+    if (value->IsHeapNumber()) return NumberInfo::kHeapNumber;
+    return NumberInfo::kUnknown;
+  }
+
+  inline void set_number_info(NumberInfo::Type info) {
+    value_ = value_ & ~NumberInfoField::mask();
+    value_ = value_ | NumberInfoField::encode(info);
+  }
+
   // The default constructor creates an invalid frame element.
   FrameElement() {
     value_ = TypeField::encode(INVALID)
         | CopiedField::encode(false)
         | SyncedField::encode(false)
+        | NumberInfoField::encode(NumberInfo::kUninitialized)
         | DataField::encode(0);
   }
 
@@ -67,15 +85,16 @@
   }
 
   // Factory function to construct an in-memory frame element.
-  static FrameElement MemoryElement() {
-    FrameElement result(MEMORY, no_reg, SYNCED);
+  static FrameElement MemoryElement(NumberInfo::Type info) {
+    FrameElement result(MEMORY, no_reg, SYNCED, info);
     return result;
   }
 
   // Factory function to construct an in-register frame element.
   static FrameElement RegisterElement(Register reg,
-                                      SyncFlag is_synced) {
-    return FrameElement(REGISTER, reg, is_synced);
+                                      SyncFlag is_synced,
+                                      NumberInfo::Type info) {
+    return FrameElement(REGISTER, reg, is_synced, info);
   }
 
   // Factory function to construct a frame element whose value is known at
@@ -185,10 +204,14 @@
   };
 
   // Used to construct memory and register elements.
-  FrameElement(Type type, Register reg, SyncFlag is_synced) {
+  FrameElement(Type type,
+               Register reg,
+               SyncFlag is_synced,
+               NumberInfo::Type info) {
     value_ = TypeField::encode(type)
         | CopiedField::encode(false)
         | SyncedField::encode(is_synced != NOT_SYNCED)
+        | NumberInfoField::encode(info)
         | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
   }
 
@@ -197,6 +220,7 @@
     value_ = TypeField::encode(CONSTANT)
         | CopiedField::encode(false)
         | SyncedField::encode(is_synced != NOT_SYNCED)
+        | NumberInfoField::encode(NumberInfo::kUninitialized)
         | DataField::encode(ConstantList()->length());
     ConstantList()->Add(value);
   }
@@ -223,9 +247,10 @@
   uint32_t value_;
 
   class TypeField: public BitField<Type, 0, 3> {};
-  class CopiedField: public BitField<uint32_t, 3, 1> {};
-  class SyncedField: public BitField<uint32_t, 4, 1> {};
-  class DataField: public BitField<uint32_t, 5, 32 - 6> {};
+  class CopiedField: public BitField<bool, 3, 1> {};
+  class SyncedField: public BitField<bool, 4, 1> {};
+  class NumberInfoField: public BitField<NumberInfo::Type, 5, 3> {};
+  class DataField: public BitField<uint32_t, 8, 32 - 9> {};
 
   friend class VirtualFrame;
 };
diff --git a/src/frames-inl.h b/src/frames-inl.h
index c5f2f1a..7221851 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -36,6 +36,8 @@
 #include "x64/frames-x64.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/frames-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/frames-mips.h"
 #else
 #error Unsupported target architecture.
 #endif
diff --git a/src/frames.cc b/src/frames.cc
index e56a2c8..5d88265 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -408,12 +408,7 @@
 
 
 Code* ExitFrame::code() const {
-  Object* code = code_slot();
-  if (code->IsSmi()) {
-    return Heap::debugger_statement_code();
-  } else {
-    return Code::cast(code);
-  }
+  return Code::cast(code_slot());
 }
 
 
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 01714cb..6371439 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -32,6 +32,7 @@
 #include "full-codegen.h"
 #include "stub-cache.h"
 #include "debug.h"
+#include "liveedit.h"
 
 namespace v8 {
 namespace internal {
@@ -439,24 +440,27 @@
 
 #define __ ACCESS_MASM(masm())
 
-Handle<Code> FullCodeGenerator::MakeCode(FunctionLiteral* fun,
-                                         Handle<Script> script,
-                                         bool is_eval) {
+Handle<Code> FullCodeGenerator::MakeCode(CompilationInfo* info) {
+  Handle<Script> script = info->script();
   if (!script->IsUndefined() && !script->source()->IsUndefined()) {
     int len = String::cast(script->source())->length();
     Counters::total_full_codegen_source_size.Increment(len);
   }
-  CodeGenerator::MakeCodePrologue(fun);
+  CodeGenerator::MakeCodePrologue(info);
   const int kInitialBufferSize = 4 * KB;
   MacroAssembler masm(NULL, kInitialBufferSize);
-  FullCodeGenerator cgen(&masm, script, is_eval);
-  cgen.Generate(fun, PRIMARY);
+  LiveEditFunctionTracker live_edit_tracker(info->function());
+
+  FullCodeGenerator cgen(&masm);
+  cgen.Generate(info, PRIMARY);
   if (cgen.HasStackOverflow()) {
     ASSERT(!Top::has_pending_exception());
     return Handle<Code>::null();
   }
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
-  return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+  Handle<Code> result = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
+  live_edit_tracker.RecordFunctionCode(result);
+  return result;
 }
 
 
@@ -467,7 +471,7 @@
   // Adjust by a (parameter or local) base offset.
   switch (slot->type()) {
     case Slot::PARAMETER:
-      offset += (function_->scope()->num_parameters() + 1) * kPointerSize;
+      offset += (scope()->num_parameters() + 1) * kPointerSize;
       break;
     case Slot::LOCAL:
       offset += JavaScriptFrameConstants::kLocal0Offset;
@@ -520,7 +524,7 @@
           }
         } else {
           Handle<JSFunction> function =
-              Compiler::BuildBoilerplate(decl->fun(), script_, this);
+              Compiler::BuildBoilerplate(decl->fun(), script(), this);
           // Check for stack-overflow exception.
           if (HasStackOverflow()) return;
           array->set(j++, *function);
@@ -987,8 +991,7 @@
   Comment cmnt(masm_, "[ DebuggerStatement");
   SetStatementPosition(stmt);
 
-  DebuggerStatementStub ces;
-  __ CallStub(&ces);
+  __ DebugBreak();
   // Ignore the return value.
 #endif
 }
@@ -1033,86 +1036,6 @@
 }
 
 
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
-  Comment cmnt(masm_, "[ Assignment");
-  ASSERT(expr->op() != Token::INIT_CONST);
-  // Left-hand side can only be a property, a global or a (parameter or local)
-  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
-  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
-  LhsKind assign_type = VARIABLE;
-  Property* prop = expr->target()->AsProperty();
-  if (prop != NULL) {
-    assign_type =
-        (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
-  }
-
-  // Evaluate LHS expression.
-  switch (assign_type) {
-    case VARIABLE:
-      // Nothing to do here.
-      break;
-    case NAMED_PROPERTY:
-      VisitForValue(prop->obj(), kStack);
-      break;
-    case KEYED_PROPERTY:
-      VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kStack);
-      break;
-  }
-
-  // If we have a compound assignment: Get value of LHS expression and
-  // store in on top of the stack.
-  if (expr->is_compound()) {
-    Location saved_location = location_;
-    location_ = kStack;
-    switch (assign_type) {
-      case VARIABLE:
-        EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
-                         Expression::kValue);
-        break;
-      case NAMED_PROPERTY:
-        EmitNamedPropertyLoad(prop);
-        __ push(result_register());
-        break;
-      case KEYED_PROPERTY:
-        EmitKeyedPropertyLoad(prop);
-        __ push(result_register());
-        break;
-    }
-    location_ = saved_location;
-  }
-
-  // Evaluate RHS expression.
-  Expression* rhs = expr->value();
-  VisitForValue(rhs, kAccumulator);
-
-  // If we have a compound assignment: Apply operator.
-  if (expr->is_compound()) {
-    Location saved_location = location_;
-    location_ = kAccumulator;
-    EmitBinaryOp(expr->binary_op(), Expression::kValue);
-    location_ = saved_location;
-  }
-
-  // Record source position before possible IC call.
-  SetSourcePosition(expr->position());
-
-  // Store the value.
-  switch (assign_type) {
-    case VARIABLE:
-      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             context_);
-      break;
-    case NAMED_PROPERTY:
-      EmitNamedPropertyAssignment(expr);
-      break;
-    case KEYED_PROPERTY:
-      EmitKeyedPropertyAssignment(expr);
-      break;
-  }
-}
-
-
 void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
   // Call runtime routine to allocate the catch extension object and
   // assign the exception value to the catch variable.
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 6688ff7..96d0f3e 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -68,11 +68,9 @@
     SECONDARY
   };
 
-  FullCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
+  explicit FullCodeGenerator(MacroAssembler* masm)
       : masm_(masm),
-        script_(script),
-        is_eval_(is_eval),
-        function_(NULL),
+        info_(NULL),
         nesting_stack_(NULL),
         loop_depth_(0),
         location_(kStack),
@@ -80,11 +78,9 @@
         false_label_(NULL) {
   }
 
-  static Handle<Code> MakeCode(FunctionLiteral* fun,
-                               Handle<Script> script,
-                               bool is_eval);
+  static Handle<Code> MakeCode(CompilationInfo* info);
 
-  void Generate(FunctionLiteral* fun, Mode mode);
+  void Generate(CompilationInfo* info, Mode mode);
 
  private:
   class Breakable;
@@ -408,6 +404,12 @@
   }
 
   MacroAssembler* masm() { return masm_; }
+
+  Handle<Script> script() { return info_->script(); }
+  bool is_eval() { return info_->is_eval(); }
+  FunctionLiteral* function() { return info_->function(); }
+  Scope* scope() { return info_->scope(); }
+
   static Register result_register();
   static Register context_register();
 
@@ -427,10 +429,7 @@
   void EmitLogicalOperation(BinaryOperation* expr);
 
   MacroAssembler* masm_;
-  Handle<Script> script_;
-  bool is_eval_;
-
-  FunctionLiteral* function_;
+  CompilationInfo* info_;
 
   Label return_label_;
   NestedStatement* nesting_stack_;
diff --git a/src/globals.h b/src/globals.h
index 39f6bcb..8f6f47c 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -46,6 +46,9 @@
 #elif defined(__ARMEL__)
 #define V8_HOST_ARCH_ARM 1
 #define V8_HOST_ARCH_32_BIT 1
+#elif defined(_MIPS_ARCH_MIPS32R2)
+#define V8_HOST_ARCH_MIPS 1
+#define V8_HOST_ARCH_32_BIT 1
 #else
 #error Your host architecture was not detected as supported by v8
 #endif
@@ -53,6 +56,7 @@
 #if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
 #define V8_TARGET_CAN_READ_UNALIGNED 1
 #elif V8_TARGET_ARCH_ARM
+#elif V8_TARGET_ARCH_MIPS
 #else
 #error Your target architecture is not supported by v8
 #endif
@@ -608,6 +612,7 @@
                   RDTSC = 4,   // x86
                   CPUID = 10,  // x86
                   VFP3 = 1,    // ARM
+                  ARMv7 = 2,   // ARM
                   SAHF = 0};   // x86
 
 } }  // namespace v8::internal
diff --git a/src/handles.cc b/src/handles.cc
index c66056e..971c916 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -300,6 +300,12 @@
 }
 
 
+Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
+  const bool skip_hidden_prototypes = false;
+  CALL_HEAP_FUNCTION(obj->SetPrototype(*value, skip_hidden_prototypes), Object);
+}
+
+
 Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
                                    bool create_if_needed) {
   Object* holder = obj->BypassGlobalProxy();
@@ -477,25 +483,25 @@
 int GetScriptLineNumber(Handle<Script> script, int code_pos) {
   InitScriptLineEnds(script);
   AssertNoAllocation no_allocation;
-  FixedArray* line_ends_array =
-      FixedArray::cast(script->line_ends());
+  FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
   const int line_ends_len = line_ends_array->length();
 
-  int line = -1;
-  if (line_ends_len > 0 &&
-      code_pos <= (Smi::cast(line_ends_array->get(0)))->value()) {
-    line = 0;
-  } else {
-    for (int i = 1; i < line_ends_len; ++i) {
-      if ((Smi::cast(line_ends_array->get(i - 1)))->value() < code_pos &&
-          code_pos <= (Smi::cast(line_ends_array->get(i)))->value()) {
-        line = i;
-        break;
-      }
+  if (!line_ends_len)
+    return -1;
+
+  if ((Smi::cast(line_ends_array->get(0)))->value() >= code_pos)
+    return script->line_offset()->value();
+
+  int left = 0;
+  int right = line_ends_len;
+  while (int half = (right - left) / 2) {
+    if ((Smi::cast(line_ends_array->get(left + half)))->value() > code_pos) {
+      right -= half;
+    } else {
+      left += half;
     }
   }
-
-  return line != -1 ? line + script->line_offset()->value() : line;
+  return right + script->line_offset()->value();
 }
 
 
@@ -686,7 +692,7 @@
 
 bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
                        ClearExceptionFlag flag) {
-  CompilationInfo info(shared, Handle<Object>::null(), 0);
+  CompilationInfo info(shared);
   return CompileLazyHelper(&info, flag);
 }
 
@@ -694,8 +700,7 @@
 bool CompileLazy(Handle<JSFunction> function,
                  Handle<Object> receiver,
                  ClearExceptionFlag flag) {
-  Handle<SharedFunctionInfo> shared(function->shared());
-  CompilationInfo info(shared, receiver, 0);
+  CompilationInfo info(function, 0, receiver);
   bool result = CompileLazyHelper(&info, flag);
   LOG(FunctionCreateEvent(*function));
   return result;
@@ -705,8 +710,7 @@
 bool CompileLazyInLoop(Handle<JSFunction> function,
                        Handle<Object> receiver,
                        ClearExceptionFlag flag) {
-  Handle<SharedFunctionInfo> shared(function->shared());
-  CompilationInfo info(shared, receiver, 1);
+  CompilationInfo info(function, 1, receiver);
   bool result = CompileLazyHelper(&info, flag);
   LOG(FunctionCreateEvent(*function));
   return result;
@@ -766,7 +770,8 @@
     Handle<String> script_name = Factory::NewStringFromAscii(name);
     bool allow_natives_syntax = FLAG_allow_natives_syntax;
     FLAG_allow_natives_syntax = true;
-    boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL);
+    boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL,
+                                    Handle<String>::null());
     FLAG_allow_natives_syntax = allow_natives_syntax;
     // If the compilation failed (possibly due to stack overflows), we
     // should never enter the result in the natives cache. Instead we
diff --git a/src/handles.h b/src/handles.h
index 04f087b..90e51fa 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -240,6 +240,8 @@
 
 Handle<Object> GetPrototype(Handle<Object> obj);
 
+Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
+
 // Return the object's hidden properties object. If the object has no hidden
 // properties and create_if_needed is true, then a new hidden property object
 // will be allocated. Otherwise the Heap::undefined_value is returned.
diff --git a/src/heap.cc b/src/heap.cc
index 5f4d815..fc4e666 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1498,12 +1498,6 @@
 #endif
 
 
-void Heap::CreateCEntryDebugBreakStub() {
-  DebuggerStatementStub stub;
-  set_debugger_statement_code(*stub.GetCode());
-}
-
-
 void Heap::CreateJSEntryStub() {
   JSEntryStub stub;
   set_js_entry_code(*stub.GetCode());
@@ -1531,7 +1525,6 @@
   // }
   // To workaround the problem, make separate functions without inlining.
   Heap::CreateCEntryStub();
-  Heap::CreateCEntryDebugBreakStub();
   Heap::CreateJSEntryStub();
   Heap::CreateJSConstructEntryStub();
 #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
@@ -1774,6 +1767,7 @@
 
 
 Object* Heap::NumberToString(Object* number) {
+  Counters::number_to_string_runtime.Increment();
   Object* cached = GetNumberStringCache(number);
   if (cached != undefined_value()) {
     return cached;
@@ -2389,12 +2383,13 @@
   map->set_unused_property_fields(in_object_properties);
   map->set_prototype(prototype);
 
-  // If the function has only simple this property assignments add field
-  // descriptors for these to the initial map as the object cannot be
-  // constructed without having these properties.
+  // If the function has only simple this property assignments add
+  // field descriptors for these to the initial map as the object
+  // cannot be constructed without having these properties.  Guard by
+  // the inline_new flag so we only change the map if we generate a
+  // specialized construct stub.
   ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
-  if (fun->shared()->has_only_simple_this_property_assignments() &&
-      fun->shared()->this_property_assignments_count() > 0) {
+  if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
     int count = fun->shared()->this_property_assignments_count();
     if (count > in_object_properties) {
       count = in_object_properties;
diff --git a/src/heap.h b/src/heap.h
index cbf0b73..22ab875 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -101,7 +101,6 @@
   V(Code, js_entry_code, JsEntryCode)                                          \
   V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
   V(Code, c_entry_code, CEntryCode)                                            \
-  V(Code, debugger_statement_code, DebuggerStatementCode)                      \
   V(FixedArray, number_string_cache, NumberStringCache)                        \
   V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
   V(FixedArray, natives_source_cache, NativesSourceCache)                      \
@@ -1046,7 +1045,6 @@
   // These four Create*EntryStub functions are here because of a gcc-4.4 bug
   // that assigns wrong vtable entries.
   static void CreateCEntryStub();
-  static void CreateCEntryDebugBreakStub();
   static void CreateJSEntryStub();
   static void CreateJSConstructEntryStub();
   static void CreateRegExpCEntryStub();
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index dc017ae..ffcefe0 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -267,7 +267,7 @@
 }
 
 // -----------------------------------------------------------------------------
-// Implementation of Assembler
+// Implementation of Assembler.
 
 // Emit a single byte. Must always be inlined.
 #define EMIT(x)                                 \
@@ -278,12 +278,12 @@
 static void InitCoverageLog();
 #endif
 
-// spare_buffer_
+// Spare buffer.
 byte* Assembler::spare_buffer_ = NULL;
 
 Assembler::Assembler(void* buffer, int buffer_size) {
   if (buffer == NULL) {
-    // do our own buffer management
+    // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
       buffer_size = kMinimalBufferSize;
 
@@ -300,7 +300,7 @@
     buffer_size_ = buffer_size;
     own_buffer_ = true;
   } else {
-    // use externally provided buffer instead
+    // Use externally provided buffer instead.
     ASSERT(buffer_size > 0);
     buffer_ = static_cast<byte*>(buffer);
     buffer_size_ = buffer_size;
@@ -316,7 +316,7 @@
   }
 #endif
 
-  // setup buffer pointers
+  // Setup buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -344,11 +344,10 @@
 
 
 void Assembler::GetCode(CodeDesc* desc) {
-  // finalize code
-  // (at this point overflow() may be true, but the gap ensures that
-  // we are still not overlapping instructions and relocation info)
-  ASSERT(pc_ <= reloc_info_writer.pos());  // no overlap
-  // setup desc
+  // Finalize code (at this point overflow() may be true, but the gap ensures
+  // that we are still not overlapping instructions and relocation info).
+  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
+  // Setup code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -435,7 +434,7 @@
 void Assembler::pop(Register dst) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
   if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
-    // (last_pc_ != NULL) is rolled into the above check
+    // (last_pc_ != NULL) is rolled into the above check.
     // If a last_pc_ is set, we need to make sure that there has not been any
     // relocation information generated between the last instruction and this
     // pop instruction.
@@ -461,7 +460,7 @@
       return;
     } else if (instr == 0xff) {  // push of an operand, convert to a move
       byte op1 = last_pc_[1];
-      // Check if the operation is really a push
+      // Check if the operation is really a push.
       if ((op1 & 0x38) == (6 << 3)) {
         op1 = (op1 & ~0x38) | static_cast<byte>(dst.code() << 3);
         last_pc_[0] = 0x8b;
@@ -747,7 +746,7 @@
   ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: 0f 40 + cc /r
+  // Opcode: 0f 40 + cc /r.
   EMIT(0x0F);
   EMIT(0x40 + cc);
   emit_operand(dst, src);
@@ -765,7 +764,7 @@
 void Assembler::xchg(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  if (src.is(eax) || dst.is(eax)) {  // Single-byte encoding
+  if (src.is(eax) || dst.is(eax)) {  // Single-byte encoding.
     EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
   } else {
     EMIT(0x87);
@@ -1434,7 +1433,7 @@
       if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
         ASSERT(byte_at(fixup_pos - 1) == 0xE9);  // jmp expected
       }
-      // relative address, relative to point after address
+      // Relative address, relative to point after address.
       int imm32 = pos - (fixup_pos + sizeof(int32_t));
       long_at_put(fixup_pos, imm32);
     }
@@ -1449,7 +1448,7 @@
   last_pc_ = NULL;
   if (appendix->is_linked()) {
     if (L->is_linked()) {
-      // append appendix to L's list
+      // Append appendix to L's list.
       Label p;
       Label q = *L;
       do {
@@ -1462,7 +1461,7 @@
       disp_at_put(&p, disp);
       p.Unuse();  // to avoid assertion failure in ~Label
     } else {
-      // L is empty, simply use appendix
+      // L is empty, simply use appendix.
       *L = *appendix;
     }
   }
@@ -1485,11 +1484,11 @@
     const int long_size = 5;
     int offs = L->pos() - pc_offset();
     ASSERT(offs <= 0);
-    // 1110 1000 #32-bit disp
+    // 1110 1000 #32-bit disp.
     EMIT(0xE8);
     emit(offs - long_size);
   } else {
-    // 1110 1000 #32-bit disp
+    // 1110 1000 #32-bit disp.
     EMIT(0xE8);
     emit_disp(L, Displacement::OTHER);
   }
@@ -1532,16 +1531,16 @@
     int offs = L->pos() - pc_offset();
     ASSERT(offs <= 0);
     if (is_int8(offs - short_size)) {
-      // 1110 1011 #8-bit disp
+      // 1110 1011 #8-bit disp.
       EMIT(0xEB);
       EMIT((offs - short_size) & 0xFF);
     } else {
-      // 1110 1001 #32-bit disp
+      // 1110 1001 #32-bit disp.
       EMIT(0xE9);
       emit(offs - long_size);
     }
   } else {
-    // 1110 1001 #32-bit disp
+    // 1110 1001 #32-bit disp.
     EMIT(0xE9);
     emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
   }
@@ -1611,7 +1610,7 @@
   last_pc_ = pc_;
   ASSERT((0 <= cc) && (cc < 16));
   if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
-  // 0000 1111 1000 tttn #32-bit disp
+  // 0000 1111 1000 tttn #32-bit disp.
   EMIT(0x0F);
   EMIT(0x80 | cc);
   emit(entry - (pc_ + sizeof(int32_t)), rmode);
@@ -1629,7 +1628,7 @@
 }
 
 
-// FPU instructions
+// FPU instructions.
 
 void Assembler::fld(int i) {
   EnsureSpace ensure_space(this);
@@ -2225,10 +2224,10 @@
 
 
 void Assembler::GrowBuffer() {
-  ASSERT(overflow());  // should not call this otherwise
+  ASSERT(overflow());
   if (!own_buffer_) FATAL("external code buffer is too small");
 
-  // compute new buffer size
+  // Compute new buffer size.
   CodeDesc desc;  // the new buffer
   if (buffer_size_ < 4*KB) {
     desc.buffer_size = 4*KB;
@@ -2242,7 +2241,7 @@
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
-  // setup new buffer
+  // Setup new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
   desc.instr_size = pc_offset();
   desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
@@ -2253,14 +2252,14 @@
   memset(desc.buffer, 0xCC, desc.buffer_size);
 #endif
 
-  // copy the data
+  // Copy the data.
   int pc_delta = desc.buffer - buffer_;
   int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
   memmove(desc.buffer, buffer_, desc.instr_size);
   memmove(rc_delta + reloc_info_writer.pos(),
           reloc_info_writer.pos(), desc.reloc_size);
 
-  // switch buffers
+  // Switch buffers.
   if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
     spare_buffer_ = buffer_;
   } else {
@@ -2275,7 +2274,7 @@
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
-  // relocate runtime entries
+  // Relocate runtime entries.
   for (RelocIterator it(desc); !it.done(); it.next()) {
     RelocInfo::Mode rmode = it.rinfo()->rmode();
     if (rmode == RelocInfo::RUNTIME_ENTRY) {
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 9ce0734..3d7af82 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -77,7 +77,7 @@
     return 1 << code_;
   }
 
-  // (unfortunately we can't make this private in a struct)
+  // Unfortunately we can't make this private in a struct.
   int code_;
 };
 
@@ -231,7 +231,8 @@
   times_8 = 3,
   times_int_size = times_4,
   times_half_pointer_size = times_2,
-  times_pointer_size = times_4
+  times_pointer_size = times_4,
+  times_twice_pointer_size = times_8
 };
 
 
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 2c5b1d1..54ef382 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -93,7 +93,10 @@
   // edi: called object
   // eax: number of arguments
   __ bind(&non_function_call);
-
+  // CALL_NON_FUNCTION expects the non-function constructor as receiver
+  // (instead of the original receiver from the call site).  The receiver is
+  // stack element argc+1.
+  __ mov(Operand(esp, eax, times_4, kPointerSize), edi);
   // Set expected number of arguments to zero (not changing eax).
   __ Set(ebx, Immediate(0));
   __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
@@ -437,33 +440,26 @@
     __ bind(&done);
   }
 
-  // 2. Get the function to call from the stack.
-  { Label done, non_function, function;
-    // +1 ~ return address.
-    __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize));
-    __ test(edi, Immediate(kSmiTagMask));
-    __ j(zero, &non_function, not_taken);
-    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-    __ j(equal, &function, taken);
+  // 2. Get the function to call (passed as receiver) from the stack, check
+  //    if it is a function.
+  Label non_function;
+  // 1 ~ return address.
+  __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
+  __ test(edi, Immediate(kSmiTagMask));
+  __ j(zero, &non_function, not_taken);
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &non_function, not_taken);
 
-    // Non-function called: Clear the function to force exception.
-    __ bind(&non_function);
-    __ xor_(edi, Operand(edi));
-    __ jmp(&done);
 
-    // Function called: Change context eagerly to get the right global object.
-    __ bind(&function);
+  // 3a. Patch the first argument if necessary when calling a function.
+  Label shift_arguments;
+  { Label convert_to_object, use_global_receiver, patch_receiver;
+    // Change context eagerly in case we need the global receiver.
     __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
-    __ bind(&done);
-  }
-
-  // 3. Make sure first argument is an object; convert if necessary.
-  { Label call_to_object, use_global_receiver, patch_receiver, done;
-    __ mov(ebx, Operand(esp, eax, times_4, 0));
-
+    __ mov(ebx, Operand(esp, eax, times_4, 0));  // First argument.
     __ test(ebx, Immediate(kSmiTagMask));
-    __ j(zero, &call_to_object);
+    __ j(zero, &convert_to_object);
 
     __ cmp(ebx, Factory::null_value());
     __ j(equal, &use_global_receiver);
@@ -473,31 +469,28 @@
     __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
     __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
     __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
-    __ j(less, &call_to_object);
+    __ j(below, &convert_to_object);
     __ cmp(ecx, LAST_JS_OBJECT_TYPE);
-    __ j(less_equal, &done);
+    __ j(below_equal, &shift_arguments);
 
-    __ bind(&call_to_object);
-    __ EnterInternalFrame();  // preserves eax, ebx, edi
-
-    // Store the arguments count on the stack (smi tagged).
+    __ bind(&convert_to_object);
+    __ EnterInternalFrame();  // In order to preserve argument count.
     __ SmiTag(eax);
     __ push(eax);
 
-    __ push(edi);  // save edi across the call
     __ push(ebx);
     __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
     __ mov(ebx, eax);
-    __ pop(edi);  // restore edi after the call
 
-    // Get the arguments count and untag it.
     __ pop(eax);
     __ SmiUntag(eax);
-
     __ LeaveInternalFrame();
+    // Restore the function to edi.
+    __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
     __ jmp(&patch_receiver);
 
-    // Use the global receiver object from the called function as the receiver.
+    // Use the global receiver object from the called function as the
+    // receiver.
     __ bind(&use_global_receiver);
     const int kGlobalIndex =
         Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
@@ -509,50 +502,55 @@
     __ bind(&patch_receiver);
     __ mov(Operand(esp, eax, times_4, 0), ebx);
 
-    __ bind(&done);
+    __ jmp(&shift_arguments);
   }
 
-  // 4. Check that the function really is a function.
-  { Label done;
-    __ test(edi, Operand(edi));
-    __ j(not_zero, &done, taken);
-    __ xor_(ebx, Operand(ebx));
-    // CALL_NON_FUNCTION will expect to find the non-function callee on the
-    // expression stack of the caller.  Transfer it from receiver to the
-    // caller's expression stack (and make the first argument the receiver
-    // for CALL_NON_FUNCTION) by decrementing the argument count.
-    __ dec(eax);
-    __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
-    __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
-           RelocInfo::CODE_TARGET);
-    __ bind(&done);
-  }
+  // 3b. Patch the first argument when calling a non-function.  The
+  //     CALL_NON_FUNCTION builtin expects the non-function callee as
+  //     receiver, so overwrite the first argument which will ultimately
+  //     become the receiver.
+  __ bind(&non_function);
+  __ mov(Operand(esp, eax, times_4, 0), edi);
+  // Clear edi to indicate a non-function being called.
+  __ xor_(edi, Operand(edi));
 
-  // 5. Shift arguments and return address one slot down on the stack
-  //    (overwriting the receiver).
+  // 4. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  __ bind(&shift_arguments);
   { Label loop;
     __ mov(ecx, eax);
     __ bind(&loop);
     __ mov(ebx, Operand(esp, ecx, times_4, 0));
     __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
     __ dec(ecx);
-    __ j(not_sign, &loop);
+    __ j(not_sign, &loop);  // While non-negative (to copy return address).
     __ pop(ebx);  // Discard copy of return address.
     __ dec(eax);  // One fewer argument (first argument is new receiver).
   }
 
-  // 6. Get the code to call from the function and check that the number of
-  //    expected arguments matches what we're providing.
-  { __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-    __ mov(ebx,
-           FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
-    __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
-    __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
-    __ cmp(eax, Operand(ebx));
-    __ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+  { Label function;
+    __ test(edi, Operand(edi));
+    __ j(not_zero, &function, taken);
+    __ xor_(ebx, Operand(ebx));
+    __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+    __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+           RelocInfo::CODE_TARGET);
+    __ bind(&function);
   }
 
-  // 7. Jump (tail-call) to the code in register edx without checking arguments.
+  // 5b. Get the code to call from the function and check that the number of
+  //     expected arguments matches what we're providing.  If so, jump
+  //     (tail-call) to the code in register edx without checking arguments.
+  __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx,
+         FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+  __ cmp(eax, Operand(ebx));
+  __ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
+
   ParameterCount expected(0);
   __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION);
 }
@@ -647,9 +645,7 @@
   __ mov(eax, Operand(ebp, kIndexOffset));
   __ jmp(&entry);
   __ bind(&loop);
-  __ mov(ecx, Operand(ebp, 2 * kPointerSize));  // load arguments
-  __ push(ecx);
-  __ push(eax);
+  __ mov(edx, Operand(ebp, 2 * kPointerSize));  // load arguments
 
   // Use inline caching to speed up access to arguments.
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -659,8 +655,7 @@
   // we have generated an inline version of the keyed load.  In this
   // case, we know that we are not generating a test instruction next.
 
-  // Remove IC arguments from the stack and push the nth argument.
-  __ add(Operand(esp), Immediate(2 * kPointerSize));
+  // Push the nth argument.
   __ push(eax);
 
   // Update the index on the stack and in register eax.
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 31adb56..7ec3ff4 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -103,14 +103,10 @@
 // -------------------------------------------------------------------------
 // CodeGenerator implementation
 
-CodeGenerator::CodeGenerator(MacroAssembler* masm,
-                             Handle<Script> script,
-                             bool is_eval)
-    : is_eval_(is_eval),
-      script_(script),
-      deferred_(8),
+CodeGenerator::CodeGenerator(MacroAssembler* masm)
+    : deferred_(8),
       masm_(masm),
-      scope_(NULL),
+      info_(NULL),
       frame_(NULL),
       allocator_(NULL),
       state_(NULL),
@@ -120,23 +116,21 @@
 }
 
 
+Scope* CodeGenerator::scope() { return info_->function()->scope(); }
+
+
 // Calling conventions:
 // ebp: caller's frame pointer
 // esp: stack pointer
 // edi: called JS function
 // esi: callee's context
 
-void CodeGenerator::Generate(FunctionLiteral* fun,
-                             Mode mode,
-                             CompilationInfo* info) {
+void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
   // Record the position for debugging purposes.
-  CodeForFunctionPosition(fun);
-
-  ZoneList<Statement*>* body = fun->body();
+  CodeForFunctionPosition(info->function());
 
   // Initialize state.
-  ASSERT(scope_ == NULL);
-  scope_ = fun->scope();
+  info_ = info;
   ASSERT(allocator_ == NULL);
   RegisterAllocator register_allocator(this);
   allocator_ = &register_allocator;
@@ -151,7 +145,7 @@
 
 #ifdef DEBUG
   if (strlen(FLAG_stop_at) > 0 &&
-      fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
     frame_->SpillAll();
     __ int3();
   }
@@ -177,7 +171,7 @@
       frame_->AllocateStackSlots();
 
       // Allocate the local context if needed.
-      int heap_slots = scope_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+      int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
       if (heap_slots > 0) {
         Comment cmnt(masm_, "[ allocate local context");
         // Allocate local context.
@@ -207,7 +201,6 @@
       // 3) don't copy parameter operand code from SlotOperand!
       {
         Comment cmnt2(masm_, "[ copy context parameters into .context");
-
         // Note that iteration order is relevant here! If we have the same
         // parameter twice (e.g., function (x, y, x)), and that parameter
         // needs to be copied into the context, it must be the last argument
@@ -216,15 +209,15 @@
         // order: such a parameter is copied repeatedly into the same
         // context location and thus the last value is what is seen inside
         // the function.
-        for (int i = 0; i < scope_->num_parameters(); i++) {
-          Variable* par = scope_->parameter(i);
+        for (int i = 0; i < scope()->num_parameters(); i++) {
+          Variable* par = scope()->parameter(i);
           Slot* slot = par->slot();
           if (slot != NULL && slot->type() == Slot::CONTEXT) {
             // The use of SlotOperand below is safe in unspilled code
             // because the slot is guaranteed to be a context slot.
             //
             // There are no parameters in the global scope.
-            ASSERT(!scope_->is_global_scope());
+            ASSERT(!scope()->is_global_scope());
             frame_->PushParameterAt(i);
             Result value = frame_->Pop();
             value.ToRegister();
@@ -252,9 +245,9 @@
       }
 
       // Initialize ThisFunction reference if present.
-      if (scope_->is_function_scope() && scope_->function() != NULL) {
+      if (scope()->is_function_scope() && scope()->function() != NULL) {
         frame_->Push(Factory::the_hole_value());
-        StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+        StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
       }
     } else {
       // When used as the secondary compiler for splitting, ebp, esi,
@@ -272,12 +265,12 @@
     // Generate code to 'execute' declarations and initialize functions
     // (source elements). In case of an illegal redeclaration we need to
     // handle that instead of processing the declarations.
-    if (scope_->HasIllegalRedeclaration()) {
+    if (scope()->HasIllegalRedeclaration()) {
       Comment cmnt(masm_, "[ illegal redeclarations");
-      scope_->VisitIllegalRedeclaration(this);
+      scope()->VisitIllegalRedeclaration(this);
     } else {
       Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope_->declarations());
+      ProcessDeclarations(scope()->declarations());
       // Bail out if a stack-overflow exception occurred when processing
       // declarations.
       if (HasStackOverflow()) return;
@@ -292,7 +285,7 @@
     // Compile the body of the function in a vanilla state. Don't
     // bother compiling all the code if the scope has an illegal
     // redeclaration.
-    if (!scope_->HasIllegalRedeclaration()) {
+    if (!scope()->HasIllegalRedeclaration()) {
       Comment cmnt(masm_, "[ function body");
 #ifdef DEBUG
       bool is_builtin = Bootstrapper::IsActive();
@@ -303,14 +296,14 @@
         // Ignore the return value.
       }
 #endif
-      VisitStatements(body);
+      VisitStatements(info->function()->body());
 
       // Handle the return from the function.
       if (has_valid_frame()) {
         // If there is a valid frame, control flow can fall off the end of
         // the body.  In that case there is an implicit return statement.
         ASSERT(!function_return_is_shadowed_);
-        CodeForReturnPosition(fun);
+        CodeForReturnPosition(info->function());
         frame_->PrepareForReturn();
         Result undefined(Factory::undefined_value());
         if (function_return_.is_bound()) {
@@ -353,7 +346,6 @@
   // There is no need to delete the register allocator, it is a
   // stack-allocated local.
   allocator_ = NULL;
-  scope_ = NULL;
 }
 
 
@@ -582,7 +574,9 @@
   } else if (variable != NULL && variable->slot() != NULL) {
     // For a variable that rewrites to a slot, we signal it is the immediate
     // subexpression of a typeof.
-    LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+    Result result =
+        LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+    frame()->Push(&result);
   } else {
     // Anything else can be handled normally.
     Load(expr);
@@ -590,13 +584,13 @@
 }
 
 
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
-  if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-  ASSERT(scope_->arguments_shadow() != NULL);
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
+  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+  ASSERT(scope()->arguments_shadow() != NULL);
   // We don't want to do lazy arguments allocation for functions that
   // have heap-allocated contexts, because it interfers with the
   // uninitialized const tracking in the context objects.
-  return (scope_->num_heap_slots() > 0)
+  return (scope()->num_heap_slots() > 0)
       ? EAGER_ARGUMENTS_ALLOCATION
       : LAZY_ARGUMENTS_ALLOCATION;
 }
@@ -616,13 +610,13 @@
     ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
     frame_->PushFunction();
     frame_->PushReceiverSlotAddress();
-    frame_->Push(Smi::FromInt(scope_->num_parameters()));
+    frame_->Push(Smi::FromInt(scope()->num_parameters()));
     Result result = frame_->CallStub(&stub, 3);
     frame_->Push(&result);
   }
 
-  Variable* arguments = scope_->arguments()->var();
-  Variable* shadow = scope_->arguments_shadow()->var();
+  Variable* arguments = scope()->arguments()->var();
+  Variable* shadow = scope()->arguments_shadow()->var();
   ASSERT(arguments != NULL && arguments->slot() != NULL);
   ASSERT(shadow != NULL && shadow->slot() != NULL);
   JumpTarget done;
@@ -631,8 +625,7 @@
     // We have to skip storing into the arguments slot if it has already
     // been written to. This can happen if the a function has a local
     // variable named 'arguments'.
-    LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
-    Result probe = frame_->Pop();
+    Result probe = LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
     if (probe.is_constant()) {
       // We have to skip updating the arguments object if it has
       // been assigned a proper value.
@@ -729,35 +722,54 @@
   // The value to convert should be popped from the frame.
   Result value = frame_->Pop();
   value.ToRegister();
-  // Fast case checks.
 
-  // 'false' => false.
-  __ cmp(value.reg(), Factory::false_value());
-  dest->false_target()->Branch(equal);
+  if (value.is_number()) {
+    Comment cmnt(masm_, "ONLY_NUMBER");
+    // Fast case if NumberInfo indicates only numbers.
+    if (FLAG_debug_code) {
+      __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
+    }
+    // Smi => false iff zero.
+    ASSERT(kSmiTag == 0);
+    __ test(value.reg(), Operand(value.reg()));
+    dest->false_target()->Branch(zero);
+    __ test(value.reg(), Immediate(kSmiTagMask));
+    dest->true_target()->Branch(zero);
+    __ fldz();
+    __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
+    __ FCmp();
+    value.Unuse();
+    dest->Split(not_zero);
+  } else {
+    // Fast case checks.
+    // 'false' => false.
+    __ cmp(value.reg(), Factory::false_value());
+    dest->false_target()->Branch(equal);
 
-  // 'true' => true.
-  __ cmp(value.reg(), Factory::true_value());
-  dest->true_target()->Branch(equal);
+    // 'true' => true.
+    __ cmp(value.reg(), Factory::true_value());
+    dest->true_target()->Branch(equal);
 
-  // 'undefined' => false.
-  __ cmp(value.reg(), Factory::undefined_value());
-  dest->false_target()->Branch(equal);
+    // 'undefined' => false.
+    __ cmp(value.reg(), Factory::undefined_value());
+    dest->false_target()->Branch(equal);
 
-  // Smi => false iff zero.
-  ASSERT(kSmiTag == 0);
-  __ test(value.reg(), Operand(value.reg()));
-  dest->false_target()->Branch(zero);
-  __ test(value.reg(), Immediate(kSmiTagMask));
-  dest->true_target()->Branch(zero);
+    // Smi => false iff zero.
+    ASSERT(kSmiTag == 0);
+    __ test(value.reg(), Operand(value.reg()));
+    dest->false_target()->Branch(zero);
+    __ test(value.reg(), Immediate(kSmiTagMask));
+    dest->true_target()->Branch(zero);
 
-  // Call the stub for all other cases.
-  frame_->Push(&value);  // Undo the Pop() from above.
-  ToBooleanStub stub;
-  Result temp = frame_->CallStub(&stub, 1);
-  // Convert the result to a condition code.
-  __ test(temp.reg(), Operand(temp.reg()));
-  temp.Unuse();
-  dest->Split(not_equal);
+    // Call the stub for all other cases.
+    frame_->Push(&value);  // Undo the Pop() from above.
+    ToBooleanStub stub;
+    Result temp = frame_->CallStub(&stub, 1);
+    // Convert the result to a condition code.
+    __ test(temp.reg(), Operand(temp.reg()));
+    temp.Unuse();
+    dest->Split(not_equal);
+  }
 }
 
 
@@ -797,6 +809,10 @@
   static void LoadAsIntegers(MacroAssembler* masm,
                              bool use_sse3,
                              Label* operand_conversion_failure);
+  // Test if operands are smis or heap numbers and load them
+  // into xmm0 and xmm1 if they are. Operands are in edx and eax.
+  // Leaves operands unchanged.
+  static void LoadSSE2Operands(MacroAssembler* masm);
   // Test if operands are numbers (smi or HeapNumber objects), and load
   // them into xmm0 and xmm1 if they are.  Jump to label not_numbers if
   // either operand is not a number.  Operands are in edx and eax.
@@ -824,12 +840,13 @@
   }
 
   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "GenericBinaryOpStub_%s_%s%s_%s%s",
+               "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
                op_name,
                overwrite_name,
                (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
                args_in_registers_ ? "RegArgs" : "StackArgs",
-               args_reversed_ ? "_R" : "");
+               args_reversed_ ? "_R" : "",
+               NumberInfo::ToString(operands_type_));
   return name_;
 }
 
@@ -979,27 +996,35 @@
     // Neither operand is known to be a string.
   }
 
-  bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
-  bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
-  bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
-  bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
+  bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
+  bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
+  bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
+  bool right_is_non_smi_constant =
+      right.is_constant() && !right.handle()->IsSmi();
 
-  if (left_is_smi && right_is_smi) {
+  if (left_is_smi_constant && right_is_smi_constant) {
     // Compute the constant result at compile time, and leave it on the frame.
     int left_int = Smi::cast(*left.handle())->value();
     int right_int = Smi::cast(*right.handle())->value();
     if (FoldConstantSmis(op, left_int, right_int)) return;
   }
 
+  // Get number type of left and right sub-expressions.
+  NumberInfo::Type operands_type =
+      NumberInfo::Combine(left.number_info(), right.number_info());
+
   Result answer;
-  if (left_is_non_smi || right_is_non_smi) {
+  if (left_is_non_smi_constant || right_is_non_smi_constant) {
     // Go straight to the slow case, with no smi code.
-    GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
+    GenericBinaryOpStub stub(op,
+                             overwrite_mode,
+                             NO_SMI_CODE_IN_STUB,
+                             operands_type);
     answer = stub.GenerateCall(masm_, frame_, &left, &right);
-  } else if (right_is_smi) {
+  } else if (right_is_smi_constant) {
     answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
                                         type, false, overwrite_mode);
-  } else if (left_is_smi) {
+  } else if (left_is_smi_constant) {
     answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
                                         type, true, overwrite_mode);
   } else {
@@ -1011,10 +1036,67 @@
     if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
       answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
     } else {
-      GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
+      GenericBinaryOpStub stub(op,
+                               overwrite_mode,
+                               NO_GENERIC_BINARY_FLAGS,
+                               operands_type);
       answer = stub.GenerateCall(masm_, frame_, &left, &right);
     }
   }
+
+  // Set NumberInfo of result according to the operation performed.
+  // Rely on the fact that smis have a 31 bit payload on ia32.
+  ASSERT(kSmiValueSize == 31);
+  NumberInfo::Type result_type = NumberInfo::kUnknown;
+  switch (op) {
+    case Token::COMMA:
+      result_type = right.number_info();
+      break;
+    case Token::OR:
+    case Token::AND:
+      // Result type can be either of the two input types.
+      result_type = operands_type;
+      break;
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+      // Result is always a number. Smi property of inputs is preserved.
+      result_type = (operands_type == NumberInfo::kSmi)
+          ? NumberInfo::kSmi
+          : NumberInfo::kNumber;
+      break;
+    case Token::SAR:
+      // Result is a smi if we shift by a constant >= 1, otherwise a number.
+      result_type = (right.is_constant() && right.handle()->IsSmi()
+                     && Smi::cast(*right.handle())->value() >= 1)
+          ? NumberInfo::kSmi
+          : NumberInfo::kNumber;
+      break;
+    case Token::SHR:
+      // Result is a smi if we shift by a constant >= 2, otherwise a number.
+      result_type = (right.is_constant() && right.handle()->IsSmi()
+                     && Smi::cast(*right.handle())->value() >= 2)
+          ? NumberInfo::kSmi
+          : NumberInfo::kNumber;
+      break;
+    case Token::ADD:
+      // Result could be a string or a number. Check types of inputs.
+      result_type = NumberInfo::IsNumber(operands_type)
+          ? NumberInfo::kNumber
+          : NumberInfo::kUnknown;
+      break;
+    case Token::SHL:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+      // Result is always a number.
+      result_type = NumberInfo::kNumber;
+      break;
+    default:
+      UNREACHABLE();
+  }
+  answer.set_number_info(result_type);
   frame_->Push(&answer);
 }
 
@@ -1856,6 +1938,39 @@
       break;
     }
 
+    case Token::DIV:
+      if (!reversed && int_value == 2) {
+        operand->ToRegister();
+        frame_->Spill(operand->reg());
+
+        DeferredInlineSmiOperation* deferred =
+            new DeferredInlineSmiOperation(op,
+                                           operand->reg(),
+                                           operand->reg(),
+                                           smi_value,
+                                           overwrite_mode);
+        // Check that lowest log2(value) bits of operand are zero, and test
+        // smi tag at the same time.
+        ASSERT_EQ(0, kSmiTag);
+        ASSERT_EQ(1, kSmiTagSize);
+        __ test(operand->reg(), Immediate(3));
+        deferred->Branch(not_zero);  // Branch if non-smi or odd smi.
+        __ sar(operand->reg(), 1);
+        deferred->BindExit();
+        answer = *operand;
+      } else {
+        // Cannot fall through MOD to default case, so we duplicate the
+        // default case here.
+        Result constant_operand(value);
+        if (reversed) {
+          answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                            overwrite_mode);
+        } else {
+          answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+                                            overwrite_mode);
+        }
+      }
+      break;
     // Generate inline code for mod of powers of 2 and negative powers of 2.
     case Token::MOD:
       if (!reversed &&
@@ -2335,6 +2450,7 @@
   // Load applicand.apply onto the stack. This will usually
   // give us a megamorphic load site. Not super, but it works.
   Load(applicand);
+  frame()->Dup();
   Handle<String> name = Factory::LookupAsciiSymbol("apply");
   frame()->Push(name);
   Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
@@ -2344,7 +2460,9 @@
   // Load the receiver and the existing arguments object onto the
   // expression stack. Avoid allocating the arguments object here.
   Load(receiver);
-  LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+  Result existing_args =
+      LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+  frame()->Push(&existing_args);
 
   // Emit the source position information after having loaded the
   // receiver and the arguments.
@@ -2424,8 +2542,8 @@
       __ j(equal, &adapted);
 
       // No arguments adaptor frame. Copy fixed number of arguments.
-      __ mov(eax, Immediate(scope_->num_parameters()));
-      for (int i = 0; i < scope_->num_parameters(); i++) {
+      __ mov(eax, Immediate(scope()->num_parameters()));
+      for (int i = 0; i < scope()->num_parameters(); i++) {
         __ push(frame_->ParameterAt(i));
       }
       __ jmp(&invoke);
@@ -2831,7 +2949,7 @@
   // Leave the frame and return popping the arguments and the
   // receiver.
   frame_->Exit();
-  masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+  masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
   DeleteFrame();
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -3914,35 +4032,32 @@
   // Spill everything, even constants, to the frame.
   frame_->SpillAll();
 
-  DebuggerStatementStub ces;
-  frame_->CallStub(&ces, 0);
+  frame_->DebugBreak();
   // Ignore the return value.
 #endif
 }
 
 
-void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+Result CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
   ASSERT(boilerplate->IsBoilerplate());
 
   // The inevitable call will sync frame elements to memory anyway, so
   // we do it eagerly to allow us to push the arguments directly into
   // place.
-  frame_->SyncRange(0, frame_->element_count() - 1);
+  frame()->SyncRange(0, frame()->element_count() - 1);
 
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
   if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
     FastNewClosureStub stub;
-    frame_->EmitPush(Immediate(boilerplate));
-    Result answer = frame_->CallStub(&stub, 1);
-    frame_->Push(&answer);
+    frame()->EmitPush(Immediate(boilerplate));
+    return frame()->CallStub(&stub, 1);
   } else {
     // Call the runtime to instantiate the function boilerplate
     // object.
-    frame_->EmitPush(esi);
-    frame_->EmitPush(Immediate(boilerplate));
-    Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
-    frame_->Push(&result);
+    frame()->EmitPush(esi);
+    frame()->EmitPush(Immediate(boilerplate));
+    return frame()->CallRuntime(Runtime::kNewClosure, 2);
   }
 }
 
@@ -3952,17 +4067,19 @@
 
   // Build the function boilerplate and instantiate it.
   Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(node, script_, this);
+      Compiler::BuildBoilerplate(node, script(), this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) return;
-  InstantiateBoilerplate(boilerplate);
+  Result result = InstantiateBoilerplate(boilerplate);
+  frame()->Push(&result);
 }
 
 
 void CodeGenerator::VisitFunctionBoilerplateLiteral(
     FunctionBoilerplateLiteral* node) {
   Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
-  InstantiateBoilerplate(node->boilerplate());
+  Result result = InstantiateBoilerplate(node->boilerplate());
+  frame()->Push(&result);
 }
 
 
@@ -3998,13 +4115,12 @@
 }
 
 
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+  Result result;
   if (slot->type() == Slot::LOOKUP) {
     ASSERT(slot->var()->is_dynamic());
-
     JumpTarget slow;
     JumpTarget done;
-    Result value;
 
     // Generate fast-case code for variables that might be shadowed by
     // eval-introduced variables.  Eval is used a lot without
@@ -4012,14 +4128,10 @@
     // perform a runtime call for all variables in the scope
     // containing the eval.
     if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-      value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
+      result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
       // If there was no control flow to slow, we can exit early.
-      if (!slow.is_linked()) {
-        frame_->Push(&value);
-        return;
-      }
-
-      done.Jump(&value);
+      if (!slow.is_linked()) return result;
+      done.Jump(&result);
 
     } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
       Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
@@ -4029,21 +4141,21 @@
         // Allocate a fresh register to use as a temp in
         // ContextSlotOperandCheckExtensions and to hold the result
         // value.
-        value = allocator_->Allocate();
-        ASSERT(value.is_valid());
-        __ mov(value.reg(),
+        result = allocator()->Allocate();
+        ASSERT(result.is_valid());
+        __ mov(result.reg(),
                ContextSlotOperandCheckExtensions(potential_slot,
-                                                 value,
+                                                 result,
                                                  &slow));
         if (potential_slot->var()->mode() == Variable::CONST) {
-          __ cmp(value.reg(), Factory::the_hole_value());
-          done.Branch(not_equal, &value);
-          __ mov(value.reg(), Factory::undefined_value());
+          __ cmp(result.reg(), Factory::the_hole_value());
+          done.Branch(not_equal, &result);
+          __ mov(result.reg(), Factory::undefined_value());
         }
         // There is always control flow to slow from
         // ContextSlotOperandCheckExtensions so we have to jump around
         // it.
-        done.Jump(&value);
+        done.Jump(&result);
       }
     }
 
@@ -4051,18 +4163,18 @@
     // A runtime call is inevitable.  We eagerly sync frame elements
     // to memory so that we can push the arguments directly into place
     // on top of the frame.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(esi);
-    frame_->EmitPush(Immediate(slot->var()->name()));
+    frame()->SyncRange(0, frame()->element_count() - 1);
+    frame()->EmitPush(esi);
+    frame()->EmitPush(Immediate(slot->var()->name()));
     if (typeof_state == INSIDE_TYPEOF) {
-      value =
-          frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+      result =
+          frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
     } else {
-      value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+      result = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
     }
 
-    done.Bind(&value);
-    frame_->Push(&value);
+    done.Bind(&result);
+    return result;
 
   } else if (slot->var()->mode() == Variable::CONST) {
     // Const slots may contain 'the hole' value (the constant hasn't been
@@ -4073,19 +4185,21 @@
     // potentially unsafe direct-frame access of SlotOperand.
     VirtualFrame::SpilledScope spilled_scope;
     Comment cmnt(masm_, "[ Load const");
-    JumpTarget exit;
+    Label exit;
     __ mov(ecx, SlotOperand(slot, ecx));
     __ cmp(ecx, Factory::the_hole_value());
-    exit.Branch(not_equal);
+    __ j(not_equal, &exit);
     __ mov(ecx, Factory::undefined_value());
-    exit.Bind();
-    frame_->EmitPush(ecx);
+    __ bind(&exit);
+    return Result(ecx);
 
   } else if (slot->type() == Slot::PARAMETER) {
-    frame_->PushParameterAt(slot->index());
+    frame()->PushParameterAt(slot->index());
+    return frame()->Pop();
 
   } else if (slot->type() == Slot::LOCAL) {
-    frame_->PushLocalAt(slot->index());
+    frame()->PushLocalAt(slot->index());
+    return frame()->Pop();
 
   } else {
     // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
@@ -4094,49 +4208,46 @@
     // The use of SlotOperand below is safe for an unspilled frame
     // because it will always be a context slot.
     ASSERT(slot->type() == Slot::CONTEXT);
-    Result temp = allocator_->Allocate();
-    ASSERT(temp.is_valid());
-    __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
-    frame_->Push(&temp);
+    result = allocator()->Allocate();
+    ASSERT(result.is_valid());
+    __ mov(result.reg(), SlotOperand(slot, result.reg()));
+    return result;
   }
 }
 
 
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
-                                                  TypeofState state) {
-  LoadFromSlot(slot, state);
+Result CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+                                                    TypeofState state) {
+  Result result = LoadFromSlot(slot, state);
 
   // Bail out quickly if we're not using lazy arguments allocation.
-  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return result;
 
   // ... or if the slot isn't a non-parameter arguments slot.
-  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
-  // Pop the loaded value from the stack.
-  Result value = frame_->Pop();
+  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return result;
 
   // If the loaded value is a constant, we know if the arguments
   // object has been lazily loaded yet.
-  if (value.is_constant()) {
-    if (value.handle()->IsTheHole()) {
-      Result arguments = StoreArgumentsObject(false);
-      frame_->Push(&arguments);
+  if (result.is_constant()) {
+    if (result.handle()->IsTheHole()) {
+      result.Unuse();
+      return StoreArgumentsObject(false);
     } else {
-      frame_->Push(&value);
+      return result;
     }
-    return;
   }
 
   // The loaded value is in a register. If it is the sentinel that
   // indicates that we haven't loaded the arguments object yet, we
   // need to do it now.
   JumpTarget exit;
-  __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
-  frame_->Push(&value);
-  exit.Branch(not_equal);
-  Result arguments = StoreArgumentsObject(false);
-  frame_->SetElementAt(0, &arguments);
-  exit.Bind();
+  __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
+  exit.Branch(not_equal, &result);
+
+  result.Unuse();
+  result = StoreArgumentsObject(false);
+  exit.Bind(&result);
+  return result;
 }
 
 
@@ -4206,8 +4317,6 @@
   // property case was inlined.  Ensure that there is not a test eax
   // instruction here.
   __ nop();
-  // Discard the global object. The result is in answer.
-  frame_->Drop();
   return answer;
 }
 
@@ -4312,7 +4421,8 @@
 
 void CodeGenerator::VisitSlot(Slot* node) {
   Comment cmnt(masm_, "[ Slot");
-  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
+  Result result = LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
+  frame()->Push(&result);
 }
 
 
@@ -4607,106 +4717,214 @@
 }
 
 
+void CodeGenerator::EmitSlotAssignment(Assignment* node) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Comment cmnt(masm(), "[ Variable Assignment");
+  Variable* var = node->target()->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL);
+  Slot* slot = var->slot();
+  ASSERT(slot != NULL);
+
+  // Evaluate the right-hand side.
+  if (node->is_compound()) {
+    Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+    frame()->Push(&result);
+    Load(node->value());
+
+    bool overwrite_value =
+        (node->value()->AsBinaryOperation() != NULL &&
+         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+    GenericBinaryOperation(node->binary_op(),
+                           node->type(),
+                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+  } else {
+    Load(node->value());
+  }
+
+  // Perform the assignment.
+  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
+    CodeForSourcePosition(node->position());
+    StoreToSlot(slot,
+                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
+  }
+  ASSERT(frame()->height() == original_height + 1);
+}
+
+
+void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Comment cmnt(masm(), "[ Named Property Assignment");
+  Variable* var = node->target()->AsVariableProxy()->AsVariable();
+  Property* prop = node->target()->AsProperty();
+  ASSERT(var == NULL || (prop == NULL && var->is_global()));
+
+  // Initialize name and evaluate the receiver subexpression.
+  Handle<String> name;
+  if (var != NULL) {
+    name = var->name();
+    LoadGlobal();
+  } else {
+    Literal* lit = prop->key()->AsLiteral();
+    ASSERT(lit != NULL);
+    name = Handle<String>::cast(lit->handle());
+    Load(prop->obj());
+  }
+
+  if (node->starts_initialization_block()) {
+    // Change to slow case in the beginning of an initialization block to
+    // avoid the quadratic behavior of repeatedly adding fast properties.
+    frame()->Dup();
+    Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
+  }
+
+  if (node->ends_initialization_block()) {
+    // Add an extra copy of the receiver to the frame, so that it can be
+    // converted back to fast case after the assignment.
+    frame()->Dup();
+  }
+
+  // Evaluate the right-hand side.
+  if (node->is_compound()) {
+    frame()->Dup();
+    Result value = EmitNamedLoad(name, var != NULL);
+    frame()->Push(&value);
+    Load(node->value());
+
+    bool overwrite_value =
+        (node->value()->AsBinaryOperation() != NULL &&
+         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+    GenericBinaryOperation(node->binary_op(),
+                           node->type(),
+                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+  } else {
+    Load(node->value());
+  }
+
+  // Perform the assignment.  It is safe to ignore constants here.
+  ASSERT(var == NULL || var->mode() != Variable::CONST);
+  ASSERT(node->op() != Token::INIT_CONST);
+  CodeForSourcePosition(node->position());
+  Result answer = EmitNamedStore(name);
+  frame()->Push(&answer);
+
+  if (node->ends_initialization_block()) {
+    // The argument to the runtime call is the extra copy of the receiver,
+    // which is below the value of the assignment.  Swap the receiver and
+    // the value of the assignment expression.
+    Result result = frame()->Pop();
+    Result receiver = frame()->Pop();
+    frame()->Push(&result);
+    frame()->Push(&receiver);
+    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+  }
+
+  ASSERT(frame()->height() == original_height + 1);
+}
+
+
+void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Comment cmnt(masm_, "[ Named Property Assignment");
+  Property* prop = node->target()->AsProperty();
+  ASSERT(prop != NULL);
+
+  // Evaluate the receiver subexpression.
+  Load(prop->obj());
+
+  if (node->starts_initialization_block()) {
+    // Change to slow case in the beginning of an initialization block to
+    // avoid the quadratic behavior of repeatedly adding fast properties.
+    frame_->Dup();
+    Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+  }
+
+  if (node->ends_initialization_block()) {
+    // Add an extra copy of the receiver to the frame, so that it can be
+    // converted back to fast case after the assignment.
+    frame_->Dup();
+  }
+
+  // Evaluate the key subexpression.
+  Load(prop->key());
+
+  // Evaluate the right-hand side.
+  if (node->is_compound()) {
+    // Duplicate receiver and key.
+    frame()->PushElementAt(1);
+    frame()->PushElementAt(1);
+    Result value = EmitKeyedLoad();
+    frame()->Push(&value);
+    Load(node->value());
+
+    bool overwrite_value =
+        (node->value()->AsBinaryOperation() != NULL &&
+         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+    GenericBinaryOperation(node->binary_op(),
+                           node->type(),
+                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+  } else {
+    Load(node->value());
+  }
+
+  // Perform the assignment.  It is safe to ignore constants here.
+  ASSERT(node->op() != Token::INIT_CONST);
+  CodeForSourcePosition(node->position());
+  Result answer = EmitKeyedStore(prop->key()->type());
+  frame()->Push(&answer);
+
+  if (node->ends_initialization_block()) {
+    // The argument to the runtime call is the extra copy of the receiver,
+    // which is below the value of the assignment.  Swap the receiver and
+    // the value of the assignment expression.
+    Result result = frame()->Pop();
+    Result receiver = frame()->Pop();
+    frame()->Push(&result);
+    frame()->Push(&receiver);
+    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+  }
+
+  ASSERT(frame()->height() == original_height + 1);
+}
+
+
 void CodeGenerator::VisitAssignment(Assignment* node) {
 #ifdef DEBUG
-  int original_height = frame_->height();
+  int original_height = frame()->height();
 #endif
-  Comment cmnt(masm_, "[ Assignment");
+  Variable* var = node->target()->AsVariableProxy()->AsVariable();
+  Property* prop = node->target()->AsProperty();
 
-  { Reference target(this, node->target(), node->is_compound());
-    if (target.is_illegal()) {
-      // Fool the virtual frame into thinking that we left the assignment's
-      // value on the frame.
-      frame_->Push(Smi::FromInt(0));
-      return;
-    }
-    Variable* var = node->target()->AsVariableProxy()->AsVariable();
+  if (var != NULL && !var->is_global()) {
+    EmitSlotAssignment(node);
 
-    if (node->starts_initialization_block()) {
-      ASSERT(target.type() == Reference::NAMED ||
-             target.type() == Reference::KEYED);
-      // Change to slow case in the beginning of an initialization
-      // block to avoid the quadratic behavior of repeatedly adding
-      // fast properties.
+  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
+             (var != NULL && var->is_global())) {
+    // Properties whose keys are property names and global variables are
+    // treated as named property references.  We do not need to consider
+    // global 'this' because it is not a valid left-hand side.
+    EmitNamedPropertyAssignment(node);
 
-      // The receiver is the argument to the runtime call.  It is the
-      // first value pushed when the reference was loaded to the
-      // frame.
-      frame_->PushElementAt(target.size() - 1);
-      Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
-    }
-    if (node->ends_initialization_block()) {
-      // Add an extra copy of the receiver to the frame, so that it can be
-      // converted back to fast case after the assignment.
-      ASSERT(target.type() == Reference::NAMED ||
-             target.type() == Reference::KEYED);
-      if (target.type() == Reference::NAMED) {
-        frame_->Dup();
-        // Dup target receiver on stack.
-      } else {
-        ASSERT(target.type() == Reference::KEYED);
-        Result temp = frame_->Pop();
-        frame_->Dup();
-        frame_->Push(&temp);
-      }
-    }
-    if (node->op() == Token::ASSIGN ||
-        node->op() == Token::INIT_VAR ||
-        node->op() == Token::INIT_CONST) {
-      Load(node->value());
+  } else if (prop != NULL) {
+    // Other properties (including rewritten parameters for a function that
+    // uses arguments) are keyed property assignments.
+    EmitKeyedPropertyAssignment(node);
 
-    } else {  // Assignment is a compound assignment.
-      Literal* literal = node->value()->AsLiteral();
-      bool overwrite_value =
-          (node->value()->AsBinaryOperation() != NULL &&
-           node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
-      Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
-      // There are two cases where the target is not read in the right hand
-      // side, that are easy to test for: the right hand side is a literal,
-      // or the right hand side is a different variable.  TakeValue invalidates
-      // the target, with an implicit promise that it will be written to again
-      // before it is read.
-      if (literal != NULL || (right_var != NULL && right_var != var)) {
-        target.TakeValue();
-      } else {
-        target.GetValue();
-      }
-      Load(node->value());
-      GenericBinaryOperation(node->binary_op(),
-                             node->type(),
-                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-    }
-
-    if (var != NULL &&
-        var->mode() == Variable::CONST &&
-        node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
-      // Assignment ignored - leave the value on the stack.
-      UnloadReference(&target);
-    } else {
-      CodeForSourcePosition(node->position());
-      if (node->op() == Token::INIT_CONST) {
-        // Dynamic constant initializations must use the function context
-        // and initialize the actual constant declared. Dynamic variable
-        // initializations are simply assignments and use SetValue.
-        target.SetValue(CONST_INIT);
-      } else {
-        target.SetValue(NOT_CONST_INIT);
-      }
-      if (node->ends_initialization_block()) {
-        ASSERT(target.type() == Reference::UNLOADED);
-        // End of initialization block. Revert to fast case.  The
-        // argument to the runtime call is the extra copy of the receiver,
-        // which is below the value of the assignment.
-        // Swap the receiver and the value of the assignment expression.
-        Result lhs = frame_->Pop();
-        Result receiver = frame_->Pop();
-        frame_->Push(&lhs);
-        frame_->Push(&receiver);
-        Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
-      }
-    }
+  } else {
+    // Invalid left-hand side.
+    Load(node->target());
+    Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
+    // The runtime call doesn't actually return but the code generator will
+    // still generate code and expects a certain frame height.
+    frame()->Push(&result);
   }
-  ASSERT(frame_->height() == original_height + 1);
+
+  ASSERT(frame()->height() == original_height + 1);
 }
 
 
@@ -4911,9 +5129,9 @@
         LoadGlobalReceiver();
       } else {
         Load(property->obj());
+        frame()->Dup();
         Load(property->key());
-        Result function = EmitKeyedLoad(false);
-        frame_->Drop();  // Key.
+        Result function = EmitKeyedLoad();
         Result receiver = frame_->Pop();
         frame_->Push(&function);
         frame_->Push(&receiver);
@@ -5277,7 +5495,7 @@
   ASSERT(args->length() == 0);
   // ArgumentsAccessStub takes the parameter count as an input argument
   // in register eax.  Create a constant result for it.
-  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
   // Call the shared stub to get to the arguments.length.
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
   Result result = frame_->CallStub(&stub, &count);
@@ -5424,7 +5642,7 @@
   Load(args->at(0));
   Result key = frame_->Pop();
   // Explicitly create a constant result.
-  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
   // Call the shared stub to get to arguments[key].
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
   Result result = frame_->CallStub(&stub, &key, &count);
@@ -5535,6 +5753,17 @@
 }
 
 
+void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+
+  // Load the argument on the stack and call the stub.
+  Load(args->at(0));
+  NumberToStringStub stub;
+  Result result = frame_->CallStub(&stub, 1);
+  frame_->Push(&result);
+}
+
+
 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
   if (CheckForInlineRuntimeCall(node)) {
     return;
@@ -5669,7 +5898,6 @@
     switch (op) {
       case Token::SUB: {
         GenericUnaryOpStub stub(Token::SUB, overwrite);
-        // TODO(1222589): remove dependency of TOS being cached inside stub
         Result operand = frame_->Pop();
         Result answer = frame_->CallStub(&stub, &operand);
         frame_->Push(&answer);
@@ -6285,7 +6513,7 @@
 
 
 // Emit a LoadIC call to get the value from receiver and leave it in
-// dst.  The receiver register is restored after the call.
+// dst.
 class DeferredReferenceGetNamedValue: public DeferredCode {
  public:
   DeferredReferenceGetNamedValue(Register dst,
@@ -6308,7 +6536,9 @@
 
 
 void DeferredReferenceGetNamedValue::Generate() {
-  __ push(receiver_);
+  if (!receiver_.is(eax)) {
+    __ mov(eax, receiver_);
+  }
   __ Set(ecx, Immediate(name_));
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
   __ call(ic, RelocInfo::CODE_TARGET);
@@ -6325,7 +6555,6 @@
   __ IncrementCounter(&Counters::named_load_inline_miss, 1);
 
   if (!dst_.is(eax)) __ mov(dst_, eax);
-  __ pop(receiver_);
 }
 
 
@@ -6333,9 +6562,8 @@
  public:
   explicit DeferredReferenceGetKeyedValue(Register dst,
                                           Register receiver,
-                                          Register key,
-                                          bool is_global)
-      : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
+                                          Register key)
+      : dst_(dst), receiver_(receiver), key_(key) {
     set_comment("[ DeferredReferenceGetKeyedValue");
   }
 
@@ -6348,14 +6576,29 @@
   Register dst_;
   Register receiver_;
   Register key_;
-  bool is_global_;
 };
 
 
 void DeferredReferenceGetKeyedValue::Generate() {
-  __ push(receiver_);  // First IC argument.
-  __ push(key_);       // Second IC argument.
-
+  if (!receiver_.is(eax)) {
+    // Register eax is available for key.
+    if (!key_.is(eax)) {
+      __ mov(eax, key_);
+    }
+    if (!receiver_.is(edx)) {
+      __ mov(edx, receiver_);
+    }
+  } else if (!key_.is(edx)) {
+    // Register edx is available for receiver.
+    if (!receiver_.is(edx)) {
+      __ mov(edx, receiver_);
+    }
+    if (!key_.is(eax)) {
+      __ mov(eax, key_);
+    }
+  } else {
+    __ xchg(edx, eax);
+  }
   // Calculate the delta from the IC call instruction to the map check
   // cmp instruction in the inlined version.  This delta is stored in
   // a test(eax, delta) instruction after the call so that we can find
@@ -6363,10 +6606,7 @@
   // This means that we cannot allow test instructions after calls to
   // KeyedLoadIC stubs in other places.
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  RelocInfo::Mode mode = is_global_
-                         ? RelocInfo::CODE_TARGET_CONTEXT
-                         : RelocInfo::CODE_TARGET;
-  __ call(ic, mode);
+  __ call(ic, RelocInfo::CODE_TARGET);
   // The delta from the start of the map-compare instruction to the
   // test instruction.  We use masm_-> directly here instead of the __
   // macro because the macro sometimes uses macro expansion to turn
@@ -6379,8 +6619,6 @@
   __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
 
   if (!dst_.is(eax)) __ mov(dst_, eax);
-  __ pop(key_);
-  __ pop(receiver_);
 }
 
 
@@ -6432,12 +6670,91 @@
 }
 
 
-Result CodeGenerator::EmitKeyedLoad(bool is_global) {
-  Comment cmnt(masm_, "[ Load from keyed Property");
-  // Inline array load code if inside of a loop.  We do not know
-  // the receiver map yet, so we initially generate the code with
-  // a check against an invalid map.  In the inline cache code, we
-  // patch the map check if appropriate.
+Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Result result;
+  // Do not inline the inobject property case for loads from the global
+  // object.  Also do not inline for unoptimized code.  This saves time in
+  // the code generator.  Unoptimized code is toplevel code or code that is
+  // not in a loop.
+  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+    Comment cmnt(masm(), "[ Load from named Property");
+    frame()->Push(name);
+
+    RelocInfo::Mode mode = is_contextual
+        ? RelocInfo::CODE_TARGET_CONTEXT
+        : RelocInfo::CODE_TARGET;
+    result = frame()->CallLoadIC(mode);
+    // A test eax instruction following the call signals that the inobject
+    // property case was inlined.  Ensure that there is not a test eax
+    // instruction here.
+    __ nop();
+  } else {
+    // Inline the inobject property case.
+    Comment cmnt(masm(), "[ Inlined named property load");
+    Result receiver = frame()->Pop();
+    receiver.ToRegister();
+
+    result = allocator()->Allocate();
+    ASSERT(result.is_valid());
+    DeferredReferenceGetNamedValue* deferred =
+        new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
+
+    // Check that the receiver is a heap object.
+    __ test(receiver.reg(), Immediate(kSmiTagMask));
+    deferred->Branch(zero);
+
+    __ bind(deferred->patch_site());
+    // This is the map check instruction that will be patched (so we can't
+    // use the double underscore macro that may insert instructions).
+    // Initially use an invalid map to force a failure.
+    masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+                Immediate(Factory::null_value()));
+    // This branch is always a forwards branch so it's always a fixed size
+    // which allows the assert below to succeed and patching to work.
+    deferred->Branch(not_equal);
+
+    // The delta from the patch label to the load offset must be statically
+    // known.
+    ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
+           LoadIC::kOffsetToLoadInstruction);
+    // The initial (invalid) offset has to be large enough to force a 32-bit
+    // instruction encoding to allow patching with an arbitrary offset.  Use
+    // kMaxInt (minus kHeapObjectTag).
+    int offset = kMaxInt;
+    masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
+
+    __ IncrementCounter(&Counters::named_load_inline, 1);
+    deferred->BindExit();
+  }
+  ASSERT(frame()->height() == original_height - 1);
+  return result;
+}
+
+
+Result CodeGenerator::EmitNamedStore(Handle<String> name) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  frame()->Push(name);
+  Result result = frame()->CallStoreIC();
+
+  ASSERT(frame()->height() == original_height - 2);
+  return result;
+}
+
+
+Result CodeGenerator::EmitKeyedLoad() {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Result result;
+  // Inline array load code if inside of a loop.  We do not know the
+  // receiver map yet, so we initially generate the code with a check
+  // against an invalid map.  In the inline cache code, we patch the map
+  // check if appropriate.
   if (loop_nesting() > 0) {
     Comment cmnt(masm_, "[ Inlined load from keyed Property");
 
@@ -6453,22 +6770,16 @@
 
     // Use a fresh temporary for the index and later the loaded
     // value.
-    Result index = allocator()->Allocate();
-    ASSERT(index.is_valid());
+    result = allocator()->Allocate();
+    ASSERT(result.is_valid());
 
     DeferredReferenceGetKeyedValue* deferred =
-        new DeferredReferenceGetKeyedValue(index.reg(),
+        new DeferredReferenceGetKeyedValue(result.reg(),
                                            receiver.reg(),
-                                           key.reg(),
-                                           is_global);
+                                           key.reg());
 
-    // Check that the receiver is not a smi (only needed if this
-    // is not a load from the global context) and that it has the
-    // expected map.
-    if (!is_global) {
-      __ test(receiver.reg(), Immediate(kSmiTagMask));
-      deferred->Branch(zero);
-    }
+    __ test(receiver.reg(), Immediate(kSmiTagMask));
+    deferred->Branch(zero);
 
     // Initially, use an invalid map. The map is patched in the IC
     // initialization code.
@@ -6493,50 +6804,132 @@
 
     // Shift the key to get the actual index value and check that
     // it is within bounds.
-    __ mov(index.reg(), key.reg());
-    __ SmiUntag(index.reg());
-    __ cmp(index.reg(),
+    __ mov(result.reg(), key.reg());
+    __ SmiUntag(result.reg());
+    __ cmp(result.reg(),
            FieldOperand(elements.reg(), FixedArray::kLengthOffset));
     deferred->Branch(above_equal);
 
-    // Load and check that the result is not the hole.  We could
-    // reuse the index or elements register for the value.
-    //
-    // TODO(206): Consider whether it makes sense to try some
-    // heuristic about which register to reuse.  For example, if
-    // one is eax, the we can reuse that one because the value
-    // coming from the deferred code will be in eax.
-    Result value = index;
-    __ mov(value.reg(), Operand(elements.reg(),
-                                index.reg(),
-                                times_4,
-                                FixedArray::kHeaderSize - kHeapObjectTag));
+    // Load and check that the result is not the hole.
+    __ mov(result.reg(), Operand(elements.reg(),
+                                 result.reg(),
+                                 times_4,
+                                 FixedArray::kHeaderSize - kHeapObjectTag));
     elements.Unuse();
-    index.Unuse();
-    __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+    __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
     deferred->Branch(equal);
     __ IncrementCounter(&Counters::keyed_load_inline, 1);
 
     deferred->BindExit();
-    // Restore the receiver and key to the frame and push the
-    // result on top of it.
-    frame_->Push(&receiver);
-    frame_->Push(&key);
-    return value;
   } else {
     Comment cmnt(masm_, "[ Load from keyed Property");
-    RelocInfo::Mode mode = is_global
-        ? RelocInfo::CODE_TARGET_CONTEXT
-        : RelocInfo::CODE_TARGET;
-    Result answer = frame_->CallKeyedLoadIC(mode);
+    result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
     // Make sure that we do not have a test instruction after the
     // call.  A test instruction after the call is used to
     // indicate that we have generated an inline version of the
     // keyed load.  The explicit nop instruction is here because
     // the push that follows might be peep-hole optimized away.
     __ nop();
-    return answer;
   }
+  ASSERT(frame()->height() == original_height - 2);
+  return result;
+}
+
+
+Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Result result;
+  // Generate inlined version of the keyed store if the code is in a loop
+  // and the key is likely to be a smi.
+  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
+    Comment cmnt(masm(), "[ Inlined store to keyed Property");
+
+    // Get the receiver, key and value into registers.
+    result = frame()->Pop();
+    Result key = frame()->Pop();
+    Result receiver = frame()->Pop();
+
+    Result tmp = allocator_->Allocate();
+    ASSERT(tmp.is_valid());
+
+    // Determine whether the value is a constant before putting it in a
+    // register.
+    bool value_is_constant = result.is_constant();
+
+    // Make sure that value, key and receiver are in registers.
+    result.ToRegister();
+    key.ToRegister();
+    receiver.ToRegister();
+
+    DeferredReferenceSetKeyedValue* deferred =
+        new DeferredReferenceSetKeyedValue(result.reg(),
+                                           key.reg(),
+                                           receiver.reg());
+
+    // Check that the value is a smi if it is not a constant.  We can skip
+    // the write barrier for smis and constants.
+    if (!value_is_constant) {
+      __ test(result.reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+    }
+
+    // Check that the key is a non-negative smi.
+    __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
+    deferred->Branch(not_zero);
+
+    // Check that the receiver is not a smi.
+    __ test(receiver.reg(), Immediate(kSmiTagMask));
+    deferred->Branch(zero);
+
+    // Check that the receiver is a JSArray.
+    __ mov(tmp.reg(),
+           FieldOperand(receiver.reg(), HeapObject::kMapOffset));
+    __ movzx_b(tmp.reg(),
+               FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
+    __ cmp(tmp.reg(), JS_ARRAY_TYPE);
+    deferred->Branch(not_equal);
+
+    // Check that the key is within bounds.  Both the key and the length of
+    // the JSArray are smis.
+    __ cmp(key.reg(),
+           FieldOperand(receiver.reg(), JSArray::kLengthOffset));
+    deferred->Branch(greater_equal);
+
+    // Get the elements array from the receiver and check that it is not a
+    // dictionary.
+    __ mov(tmp.reg(),
+           FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+    // Bind the deferred code patch site to be able to locate the fixed
+    // array map comparison.  When debugging, we patch this comparison to
+    // always fail so that we will hit the IC call in the deferred code
+    // which will allow the debugger to break for fast case stores.
+    __ bind(deferred->patch_site());
+    __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+           Immediate(Factory::fixed_array_map()));
+    deferred->Branch(not_equal);
+
+    // Store the value.
+    __ mov(Operand(tmp.reg(),
+                   key.reg(),
+                   times_2,
+                   FixedArray::kHeaderSize - kHeapObjectTag),
+           result.reg());
+    __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+    deferred->BindExit();
+  } else {
+    result = frame()->CallKeyedStoreIC();
+    // Make sure that we do not have a test instruction after the
+    // call.  A test instruction after the call is used to
+    // indicate that we have generated an inline version of the
+    // keyed store.
+    __ nop();
+    frame()->Drop(2);
+  }
+  ASSERT(frame()->height() == original_height - 3);
+  return result;
 }
 
 
@@ -6556,7 +6949,7 @@
   } else {
     Literal* raw_name = property->key()->AsLiteral();
     ASSERT(raw_name != NULL);
-    return Handle<String>(String::cast(*raw_name->handle()));
+    return Handle<String>::cast(raw_name->handle());
   }
 }
 
@@ -6578,7 +6971,10 @@
       Comment cmnt(masm, "[ Load from Slot");
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       ASSERT(slot != NULL);
-      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+      Result result =
+          cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+      if (!persist_after_get_) set_unloaded();
+      cgen_->frame()->Push(&result);
       break;
     }
 
@@ -6586,87 +6982,27 @@
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       bool is_global = var != NULL;
       ASSERT(!is_global || var->is_global());
-
-      // Do not inline the inobject property case for loads from the global
-      // object.  Also do not inline for unoptimized code.  This saves time
-      // in the code generator.  Unoptimized code is toplevel code or code
-      // that is not in a loop.
-      if (is_global ||
-          cgen_->scope()->is_global_scope() ||
-          cgen_->loop_nesting() == 0) {
-        Comment cmnt(masm, "[ Load from named Property");
-        cgen_->frame()->Push(GetName());
-
-        RelocInfo::Mode mode = is_global
-                               ? RelocInfo::CODE_TARGET_CONTEXT
-                               : RelocInfo::CODE_TARGET;
-        Result answer = cgen_->frame()->CallLoadIC(mode);
-        // A test eax instruction following the call signals that the
-        // inobject property case was inlined.  Ensure that there is not
-        // a test eax instruction here.
-        __ nop();
-        cgen_->frame()->Push(&answer);
-      } else {
-        // Inline the inobject property case.
-        Comment cmnt(masm, "[ Inlined named property load");
-        Result receiver = cgen_->frame()->Pop();
-        receiver.ToRegister();
-
-        Result value = cgen_->allocator()->Allocate();
-        ASSERT(value.is_valid());
-        DeferredReferenceGetNamedValue* deferred =
-            new DeferredReferenceGetNamedValue(value.reg(),
-                                               receiver.reg(),
-                                               GetName());
-
-        // Check that the receiver is a heap object.
-        __ test(receiver.reg(), Immediate(kSmiTagMask));
-        deferred->Branch(zero);
-
-        __ bind(deferred->patch_site());
-        // This is the map check instruction that will be patched (so we can't
-        // use the double underscore macro that may insert instructions).
-        // Initially use an invalid map to force a failure.
-        masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                  Immediate(Factory::null_value()));
-        // This branch is always a forwards branch so it's always a fixed
-        // size which allows the assert below to succeed and patching to work.
-        deferred->Branch(not_equal);
-
-        // The delta from the patch label to the load offset must be
-        // statically known.
-        ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
-               LoadIC::kOffsetToLoadInstruction);
-        // The initial (invalid) offset has to be large enough to force
-        // a 32-bit instruction encoding to allow patching with an
-        // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
-        int offset = kMaxInt;
-        masm->mov(value.reg(), FieldOperand(receiver.reg(), offset));
-
-        __ IncrementCounter(&Counters::named_load_inline, 1);
-        deferred->BindExit();
-        cgen_->frame()->Push(&receiver);
-        cgen_->frame()->Push(&value);
-      }
+      if (persist_after_get_) cgen_->frame()->Dup();
+      Result result = cgen_->EmitNamedLoad(GetName(), is_global);
+      if (!persist_after_get_) set_unloaded();
+      cgen_->frame()->Push(&result);
       break;
     }
 
     case KEYED: {
-      Variable* var = expression_->AsVariableProxy()->AsVariable();
-      bool is_global = var != NULL;
-      ASSERT(!is_global || var->is_global());
-      Result value = cgen_->EmitKeyedLoad(is_global);
+      if (persist_after_get_) {
+        cgen_->frame()->PushElementAt(1);
+        cgen_->frame()->PushElementAt(1);
+      }
+      Result value = cgen_->EmitKeyedLoad();
       cgen_->frame()->Push(&value);
+      if (!persist_after_get_) set_unloaded();
       break;
     }
 
     default:
       UNREACHABLE();
   }
-
-  if (!persist_after_get_) {
-    cgen_->UnloadReference(this);
-  }
 }
 
 
@@ -6716,14 +7052,13 @@
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       ASSERT(slot != NULL);
       cgen_->StoreToSlot(slot, init_state);
-      cgen_->UnloadReference(this);
+      set_unloaded();
       break;
     }
 
     case NAMED: {
       Comment cmnt(masm, "[ Store to named Property");
-      cgen_->frame()->Push(GetName());
-      Result answer = cgen_->frame()->CallStoreIC();
+      Result answer = cgen_->EmitNamedStore(GetName());
       cgen_->frame()->Push(&answer);
       set_unloaded();
       break;
@@ -6731,108 +7066,16 @@
 
     case KEYED: {
       Comment cmnt(masm, "[ Store to keyed Property");
-
-      // Generate inlined version of the keyed store if the code is in
-      // a loop and the key is likely to be a smi.
       Property* property = expression()->AsProperty();
       ASSERT(property != NULL);
-      StaticType* key_smi_analysis = property->key()->type();
-
-      if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
-        Comment cmnt(masm, "[ Inlined store to keyed Property");
-
-        // Get the receiver, key and value into registers.
-        Result value = cgen_->frame()->Pop();
-        Result key = cgen_->frame()->Pop();
-        Result receiver = cgen_->frame()->Pop();
-
-        Result tmp = cgen_->allocator_->Allocate();
-        ASSERT(tmp.is_valid());
-
-        // Determine whether the value is a constant before putting it
-        // in a register.
-        bool value_is_constant = value.is_constant();
-
-        // Make sure that value, key and receiver are in registers.
-        value.ToRegister();
-        key.ToRegister();
-        receiver.ToRegister();
-
-        DeferredReferenceSetKeyedValue* deferred =
-            new DeferredReferenceSetKeyedValue(value.reg(),
-                                               key.reg(),
-                                               receiver.reg());
-
-        // Check that the value is a smi if it is not a constant.  We
-        // can skip the write barrier for smis and constants.
-        if (!value_is_constant) {
-          __ test(value.reg(), Immediate(kSmiTagMask));
-          deferred->Branch(not_zero);
-        }
-
-        // Check that the key is a non-negative smi.
-        __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
-        deferred->Branch(not_zero);
-
-        // Check that the receiver is not a smi.
-        __ test(receiver.reg(), Immediate(kSmiTagMask));
-        deferred->Branch(zero);
-
-        // Check that the receiver is a JSArray.
-        __ mov(tmp.reg(),
-               FieldOperand(receiver.reg(), HeapObject::kMapOffset));
-        __ movzx_b(tmp.reg(),
-                   FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
-        __ cmp(tmp.reg(), JS_ARRAY_TYPE);
-        deferred->Branch(not_equal);
-
-        // Check that the key is within bounds.  Both the key and the
-        // length of the JSArray are smis.
-        __ cmp(key.reg(),
-               FieldOperand(receiver.reg(), JSArray::kLengthOffset));
-        deferred->Branch(greater_equal);
-
-        // Get the elements array from the receiver and check that it
-        // is not a dictionary.
-        __ mov(tmp.reg(),
-               FieldOperand(receiver.reg(), JSObject::kElementsOffset));
-        // Bind the deferred code patch site to be able to locate the
-        // fixed array map comparison.  When debugging, we patch this
-        // comparison to always fail so that we will hit the IC call
-        // in the deferred code which will allow the debugger to
-        // break for fast case stores.
-        __ bind(deferred->patch_site());
-        __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
-               Immediate(Factory::fixed_array_map()));
-        deferred->Branch(not_equal);
-
-        // Store the value.
-        __ mov(Operand(tmp.reg(),
-                       key.reg(),
-                       times_2,
-                       FixedArray::kHeaderSize - kHeapObjectTag),
-               value.reg());
-        __ IncrementCounter(&Counters::keyed_store_inline, 1);
-
-        deferred->BindExit();
-
-        cgen_->frame()->Push(&receiver);
-        cgen_->frame()->Push(&key);
-        cgen_->frame()->Push(&value);
-      } else {
-        Result answer = cgen_->frame()->CallKeyedStoreIC();
-        // Make sure that we do not have a test instruction after the
-        // call.  A test instruction after the call is used to
-        // indicate that we have generated an inline version of the
-        // keyed store.
-        __ nop();
-        cgen_->frame()->Push(&answer);
-      }
-      cgen_->UnloadReference(this);
+      Result answer = cgen_->EmitKeyedStore(property->key()->type());
+      cgen_->frame()->Push(&answer);
+      set_unloaded();
       break;
     }
 
-    default:
+    case UNLOADED:
+    case ILLEGAL:
       UNREACHABLE();
   }
 }
@@ -6926,6 +7169,13 @@
 
 
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [esp + kPointerSize]: constant elements.
+  // [esp + (2 * kPointerSize)]: literal index.
+  // [esp + (3 * kPointerSize)]: literals array.
+
+  // All sizes here are multiples of kPointerSize.
   int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
   int size = JSArray::kSize + elements_size;
 
@@ -7509,7 +7759,18 @@
     case Token::DIV: {
       if (CpuFeatures::IsSupported(SSE2)) {
         CpuFeatures::Scope use_sse2(SSE2);
-        FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+        if (NumberInfo::IsNumber(operands_type_)) {
+          if (FLAG_debug_code) {
+            // Assert at runtime that inputs are only numbers.
+            __ AbortIfNotNumber(edx,
+                                "GenericBinaryOpStub operand not a number.");
+            __ AbortIfNotNumber(eax,
+                                "GenericBinaryOpStub operand not a number.");
+          }
+          FloatingPointHelper::LoadSSE2Operands(masm);
+        } else {
+          FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+        }
 
         switch (op_) {
           case Token::ADD: __ addsd(xmm0, xmm1); break;
@@ -7522,7 +7783,17 @@
         __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         GenerateReturn(masm);
       } else {  // SSE2 not available, use FPU.
-        FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+        if (NumberInfo::IsNumber(operands_type_)) {
+          if (FLAG_debug_code) {
+            // Assert at runtime that inputs are only numbers.
+            __ AbortIfNotNumber(edx,
+                                "GenericBinaryOpStub operand not a number.");
+            __ AbortIfNotNumber(eax,
+                                "GenericBinaryOpStub operand not a number.");
+          }
+        } else {
+          FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+        }
         FloatingPointHelper::LoadFloatOperands(
             masm,
             ecx,
@@ -7634,7 +7905,7 @@
   switch (op_) {
     case Token::ADD: {
       // Test for string arguments before calling runtime.
-      Label not_strings, not_string1, string1;
+      Label not_strings, not_string1, string1, string1_smi2;
       Result answer;
       __ test(edx, Immediate(kSmiTagMask));
       __ j(zero, &not_string1);
@@ -7643,15 +7914,28 @@
 
       // First argument is a string, test second.
       __ test(eax, Immediate(kSmiTagMask));
-      __ j(zero, &string1);
+      __ j(zero, &string1_smi2);
       __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
       __ j(above_equal, &string1);
 
       // First and second argument are strings. Jump to the string add stub.
-      StringAddStub stub(NO_STRING_CHECK_IN_STUB);
-      __ TailCallStub(&stub);
+      StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+      __ TailCallStub(&string_add_stub);
 
-      // Only first argument is a string.
+      __ bind(&string1_smi2);
+      // First argument is a string, second is a smi. Try to lookup the number
+      // string for the smi in the number string cache.
+      NumberToStringStub::GenerateLookupNumberStringCache(
+          masm, eax, edi, ebx, ecx, true, &string1);
+
+      // Call the string add stub to make the result.
+      __ EnterInternalFrame();
+      __ push(edx);  // Original first argument.
+      __ push(edi);  // Number to string result for second argument.
+      __ CallStub(&string_add_stub);
+      __ LeaveInternalFrame();
+      __ ret(2 * kPointerSize);
+
       __ bind(&string1);
       __ InvokeBuiltin(
           HasArgsReversed() ?
@@ -7993,6 +8277,35 @@
 }
 
 
+void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
+  Label load_smi_edx, load_eax, load_smi_eax, done;
+  // Load operand in edx into xmm0.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_edx, not_taken);  // Argument in edx is a smi.
+  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+
+  __ bind(&load_eax);
+  // Load operand in eax into xmm1.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_eax, not_taken);  // Argument in eax is a smi.
+  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi_edx);
+  __ SmiUntag(edx);  // Untag smi before converting to float.
+  __ cvtsi2sd(xmm0, Operand(edx));
+  __ SmiTag(edx);  // Retag smi for heap number overwriting test.
+  __ jmp(&load_eax);
+
+  __ bind(&load_smi_eax);
+  __ SmiUntag(eax);  // Untag smi before converting to float.
+  __ cvtsi2sd(xmm1, Operand(eax));
+  __ SmiTag(eax);  // Retag smi for heap number overwriting test.
+
+  __ bind(&done);
+}
+
+
 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
                                            Label* not_numbers) {
   Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
@@ -8322,6 +8635,11 @@
 
 
 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+  // esp[0] : return address
+  // esp[4] : number of parameters
+  // esp[8] : receiver displacement
+  // esp[16] : function
+
   // The displacement is used for skipping the return address and the
   // frame pointer on the stack. It is the offset of the last
   // parameter (if any) relative to the frame pointer.
@@ -8405,7 +8723,6 @@
   __ add(Operand(edi), Immediate(kPointerSize));
   __ sub(Operand(edx), Immediate(kPointerSize));
   __ dec(ecx);
-  __ test(ecx, Operand(ecx));
   __ j(not_zero, &loop);
 
   // Return and remove the on-stack parameters.
@@ -8753,6 +9070,74 @@
 }
 
 
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+                                                         Register object,
+                                                         Register result,
+                                                         Register scratch1,
+                                                         Register scratch2,
+                                                         bool object_is_smi,
+                                                         Label* not_found) {
+  // Currently only lookup for smis. Check for smi if object is not known to be
+  // a smi.
+  if (!object_is_smi) {
+    ASSERT(kSmiTag == 0);
+    __ test(object, Immediate(kSmiTagMask));
+    __ j(not_zero, not_found);
+  }
+
+  // Use of registers. Register result is used as a temporary.
+  Register number_string_cache = result;
+  Register mask = scratch1;
+  Register scratch = scratch2;
+
+  // Load the number string cache.
+  ExternalReference roots_address = ExternalReference::roots_address();
+  __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
+  __ mov(number_string_cache,
+         Operand::StaticArray(scratch, times_pointer_size, roots_address));
+  // Make the hash mask from the length of the number string cache. It
+  // contains two elements (number and string) for each cache entry.
+  __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+  __ shr(mask, 1);  // Divide length by two (length is not a smi).
+  __ sub(Operand(mask), Immediate(1));  // Make mask.
+  // Calculate the entry in the number string cache. The hash value in the
+  // number string cache for smis is just the smi value.
+  __ mov(scratch, object);
+  __ SmiUntag(scratch);
+  __ and_(scratch, Operand(mask));
+  // Check if the entry is the smi we are looking for.
+  __ cmp(object,
+         FieldOperand(number_string_cache,
+                      scratch,
+                      times_twice_pointer_size,
+                      FixedArray::kHeaderSize));
+  __ j(not_equal, not_found);
+
+  // Get the result from the cache.
+  __ mov(result,
+         FieldOperand(number_string_cache,
+                      scratch,
+                      times_twice_pointer_size,
+                      FixedArray::kHeaderSize + kPointerSize));
+  __ IncrementCounter(&Counters::number_to_string_native, 1);
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+
+  __ mov(ebx, Operand(esp, kPointerSize));
+
+  // Generate code to lookup number in the number string cache.
+  GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
+  __ ret(1 * kPointerSize);
+
+  __ bind(&runtime);
+  // Handle number to string in the runtime system if not found in the cache.
+  __ TailCallRuntime(ExternalReference(Runtime::kNumberToString), 1, 1);
+}
+
+
 void CompareStub::Generate(MacroAssembler* masm) {
   Label call_builtin, done;
 
@@ -9085,6 +9470,9 @@
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+  // of the original receiver from the call site).
+  __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
   __ Set(eax, Immediate(argc_));
   __ Set(ebx, Immediate(0));
   __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
@@ -9658,13 +10046,34 @@
   // ecx: length of second string
   // edx: second string
   // Look at the length of the result of adding the two strings.
-  Label string_add_flat_result;
+  Label string_add_flat_result, longer_than_two;
   __ bind(&both_not_zero_length);
   __ add(ebx, Operand(ecx));
   // Use the runtime system when adding two one character strings, as it
   // contains optimizations for this specific case using the symbol table.
   __ cmp(ebx, 2);
-  __ j(equal, &string_add_runtime);
+  __ j(not_equal, &longer_than_two);
+
+  // Check that both strings are non-external ascii strings.
+  __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
+                                         &string_add_runtime);
+
+  // Get the two characters forming the sub string.
+  __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
+  __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+
+  // Try to lookup two character string in symbol table. If it is not found
+  // just allocate a new one.
+  Label make_two_character_string, make_flat_ascii_string;
+  GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
+                                       &make_two_character_string);
+  __ ret(2 * kPointerSize);
+
+  __ bind(&make_two_character_string);
+  __ Set(ebx, Immediate(2));
+  __ jmp(&make_flat_ascii_string);
+
+  __ bind(&longer_than_two);
   // Check if resulting string will be flat.
   __ cmp(ebx, String::kMinNonFlatLength);
   __ j(below, &string_add_flat_result);
@@ -9731,7 +10140,10 @@
   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
   __ test(ecx, Immediate(kAsciiStringTag));
   __ j(zero, &string_add_runtime);
+
+  __ bind(&make_flat_ascii_string);
   // Both strings are ascii strings. As they are short they are both flat.
+  // ebx: length of resulting flat string
   __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
   // eax: result string
   __ mov(ecx, eax);
@@ -9888,6 +10300,190 @@
 }
 
 
+void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                          Register c1,
+                                                          Register c2,
+                                                          Register scratch1,
+                                                          Register scratch2,
+                                                          Register scratch3,
+                                                          Label* not_found) {
+  // Register scratch3 is the general scratch register in this function.
+  Register scratch = scratch3;
+
+  // Make sure that both characters are not digits as such strings has a
+  // different hash algorithm. Don't try to look for these in the symbol table.
+  Label not_array_index;
+  __ mov(scratch, c1);
+  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+  __ j(above, &not_array_index);
+  __ mov(scratch, c2);
+  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+  __ j(below_equal, not_found);
+
+  __ bind(&not_array_index);
+  // Calculate the two character string hash.
+  Register hash = scratch1;
+  GenerateHashInit(masm, hash, c1, scratch);
+  GenerateHashAddCharacter(masm, hash, c2, scratch);
+  GenerateHashGetHash(masm, hash, scratch);
+
+  // Collect the two characters in a register.
+  Register chars = c1;
+  __ shl(c2, kBitsPerByte);
+  __ or_(chars, Operand(c2));
+
+  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:  hash of two character string.
+
+  // Load the symbol table.
+  Register symbol_table = c2;
+  ExternalReference roots_address = ExternalReference::roots_address();
+  __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
+  __ mov(symbol_table,
+         Operand::StaticArray(scratch, times_pointer_size, roots_address));
+
+  // Calculate capacity mask from the symbol table capacity.
+  Register mask = scratch2;
+  static const int kCapacityOffset =
+      FixedArray::kHeaderSize +
+      SymbolTable::kCapacityIndex * kPointerSize;
+  __ mov(mask, FieldOperand(symbol_table, kCapacityOffset));
+  __ SmiUntag(mask);
+  __ sub(Operand(mask), Immediate(1));
+
+  // Registers
+  // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:         hash of two character string
+  // symbol_table: symbol table
+  // mask:         capacity mask
+  // scratch:      -
+
+  // Perform a number of probes in the symbol table.
+  static const int kProbes = 4;
+  Label found_in_symbol_table;
+  Label next_probe[kProbes], next_probe_pop_mask[kProbes];
+  for (int i = 0; i < kProbes; i++) {
+    // Calculate entry in symbol table.
+    __ mov(scratch, hash);
+    if (i > 0) {
+      __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
+    }
+    __ and_(scratch, Operand(mask));
+
+    // Load the entry from the symble table.
+    Register candidate = scratch;  // Scratch register contains candidate.
+    ASSERT_EQ(1, SymbolTableShape::kEntrySize);
+    static const int kFirstElementOffset =
+        FixedArray::kHeaderSize +
+        SymbolTable::kPrefixStartIndex * kPointerSize +
+        SymbolTableShape::kPrefixSize * kPointerSize;
+    __ mov(candidate,
+           FieldOperand(symbol_table,
+                        scratch,
+                        times_pointer_size,
+                        kFirstElementOffset));
+
+    // If entry is undefined no string with this hash can be found.
+    __ cmp(candidate, Factory::undefined_value());
+    __ j(equal, not_found);
+
+    // If length is not 2 the string is not a candidate.
+    __ cmp(FieldOperand(candidate, String::kLengthOffset), Immediate(2));
+    __ j(not_equal, &next_probe[i]);
+
+    // As we are out of registers save the mask on the stack and use that
+    // register as a temporary.
+    __ push(mask);
+    Register temp = mask;
+
+    // Check that the candidate is a non-external ascii string.
+    __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
+    __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+    __ JumpIfInstanceTypeIsNotSequentialAscii(
+        temp, temp, &next_probe_pop_mask[i]);
+
+    // Check if the two characters match.
+    __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+    __ and_(temp, 0x0000ffff);
+    __ cmp(chars, Operand(temp));
+    __ j(equal, &found_in_symbol_table);
+    __ bind(&next_probe_pop_mask[i]);
+    __ pop(mask);
+    __ bind(&next_probe[i]);
+  }
+
+  // No matching 2 character string found by probing.
+  __ jmp(not_found);
+
+  // Scratch register contains result when we fall through to here.
+  Register result = scratch;
+  __ bind(&found_in_symbol_table);
+  __ pop(mask);  // Pop temporally saved mask from the stack.
+  if (!result.is(eax)) {
+    __ mov(eax, result);
+  }
+}
+
+
+void StringStubBase::GenerateHashInit(MacroAssembler* masm,
+                                      Register hash,
+                                      Register character,
+                                      Register scratch) {
+  // hash = character + (character << 10);
+  __ mov(hash, character);
+  __ shl(hash, 10);
+  __ add(hash, Operand(character));
+  // hash ^= hash >> 6;
+  __ mov(scratch, hash);
+  __ sar(scratch, 6);
+  __ xor_(hash, Operand(scratch));
+}
+
+
+void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
+                                              Register hash,
+                                              Register character,
+                                              Register scratch) {
+  // hash += character;
+  __ add(hash, Operand(character));
+  // hash += hash << 10;
+  __ mov(scratch, hash);
+  __ shl(scratch, 10);
+  __ add(hash, Operand(scratch));
+  // hash ^= hash >> 6;
+  __ mov(scratch, hash);
+  __ sar(scratch, 6);
+  __ xor_(hash, Operand(scratch));
+}
+
+
+void StringStubBase::GenerateHashGetHash(MacroAssembler* masm,
+                                         Register hash,
+                                         Register scratch) {
+  // hash += hash << 3;
+  __ mov(scratch, hash);
+  __ shl(scratch, 3);
+  __ add(hash, Operand(scratch));
+  // hash ^= hash >> 11;
+  __ mov(scratch, hash);
+  __ sar(scratch, 11);
+  __ xor_(hash, Operand(scratch));
+  // hash += hash << 15;
+  __ mov(scratch, hash);
+  __ shl(scratch, 15);
+  __ add(hash, Operand(scratch));
+
+  // if (hash == 0) hash = 27;
+  Label hash_not_zero;
+  __ test(hash, Operand(hash));
+  __ j(not_zero, &hash_not_zero);
+  __ mov(hash, Immediate(27));
+  __ bind(&hash_not_zero);
+}
+
+
 void SubStringStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
@@ -9908,26 +10504,55 @@
   // eax: string
   // ebx: instance type
   // Calculate length of sub string using the smi values.
-  __ mov(ecx, Operand(esp, 1 * kPointerSize));  // to
+  Label result_longer_than_two;
+  __ mov(ecx, Operand(esp, 1 * kPointerSize));  // To index.
   __ test(ecx, Immediate(kSmiTagMask));
   __ j(not_zero, &runtime);
-  __ mov(edx, Operand(esp, 2 * kPointerSize));  // from
+  __ mov(edx, Operand(esp, 2 * kPointerSize));  // From index.
   __ test(edx, Immediate(kSmiTagMask));
   __ j(not_zero, &runtime);
   __ sub(ecx, Operand(edx));
-  // Handle sub-strings of length 2 and less in the runtime system.
+  // Special handling of sub-strings of length 1 and 2. One character strings
+  // are handled in the runtime system (looked up in the single character
+  // cache). Two character strings are looked for in the symbol cache.
   __ SmiUntag(ecx);  // Result length is no longer smi.
   __ cmp(ecx, 2);
-  __ j(below_equal, &runtime);
+  __ j(greater, &result_longer_than_two);
+  __ j(less, &runtime);
 
+  // Sub string of length 2 requested.
+  // eax: string
+  // ebx: instance type
+  // ecx: sub string length (value is 2)
+  // edx: from index (smi)
+  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
+
+  // Get the two characters forming the sub string.
+  __ SmiUntag(edx);  // From index is no longer smi.
+  __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
+  __ movzx_b(ecx,
+             FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
+
+  // Try to lookup two character string in symbol table.
+  Label make_two_character_string;
+  GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
+                                     &make_two_character_string);
+  __ ret(2 * kPointerSize);
+
+  __ bind(&make_two_character_string);
+  // Setup registers for allocating the two character string.
+  __ mov(eax, Operand(esp, 3 * kPointerSize));
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+  __ Set(ecx, Immediate(2));
+
+  __ bind(&result_longer_than_two);
   // eax: string
   // ebx: instance type
   // ecx: result string length
   // Check for flat ascii string
   Label non_ascii_flat;
-  __ and_(ebx, kStringRepresentationMask | kStringEncodingMask);
-  __ cmp(ebx, kSeqStringTag | kAsciiStringTag);
-  __ j(not_equal, &non_ascii_flat);
+  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
 
   // Allocate the result.
   __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
@@ -9975,7 +10600,8 @@
          Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // Load string argument and locate character of sub string start.
   __ mov(esi, Operand(esp, 3 * kPointerSize));
-  __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(Operand(esi),
+         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
   // As from is a smi it is 2 times the value which matches the size of a two
   // byte character.
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 956f424..a6cb316 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -305,19 +305,15 @@
 
   // Takes a function literal, generates code for it. This function should only
   // be called by compiler.cc.
-  static Handle<Code> MakeCode(FunctionLiteral* fun,
-                               Handle<Script> script,
-                               bool is_eval,
-                               CompilationInfo* info);
+  static Handle<Code> MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
-  static void MakeCodePrologue(FunctionLiteral* fun);
+  static void MakeCodePrologue(CompilationInfo* info);
 
   // Allocate and install the code.
-  static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
-                                       MacroAssembler* masm,
+  static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
                                        Code::Flags flags,
-                                       Handle<Script> script);
+                                       CompilationInfo* info);
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
@@ -328,7 +324,7 @@
   // Accessors
   MacroAssembler* masm() { return masm_; }
   VirtualFrame* frame() const { return frame_; }
-  Handle<Script> script() { return script_; }
+  inline Handle<Script> script();
 
   bool has_valid_frame() const { return frame_ != NULL; }
 
@@ -352,11 +348,11 @@
 
  private:
   // Construction/Destruction
-  CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
+  explicit CodeGenerator(MacroAssembler* masm);
 
   // Accessors
-  Scope* scope() const { return scope_; }
-  bool is_eval() { return is_eval_; }
+  inline bool is_eval();
+  Scope* scope();
 
   // Generating deferred code.
   void ProcessDeferred();
@@ -388,7 +384,7 @@
   void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
 
   // Main code generation function
-  void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
+  void Generate(CompilationInfo* info, Mode mode);
 
   // Generate the return sequence code.  Should be called no more than
   // once per compiled function, immediately after binding the return
@@ -396,7 +392,7 @@
   void GenerateReturnSequence(Result* return_value);
 
   // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode() const;
+  ArgumentsAllocationMode ArgumentsMode();
 
   // Store the arguments object and allocate it if necessary.
   Result StoreArgumentsObject(bool initial);
@@ -433,8 +429,8 @@
   void LoadAndSpill(Expression* expression);
 
   // Read a value from a slot and leave it on top of the expression stack.
-  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
+  Result LoadFromSlot(Slot* slot, TypeofState typeof_state);
+  Result LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
   Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
                                            TypeofState typeof_state,
                                            JumpTarget* slow);
@@ -443,10 +439,22 @@
   // value in place.
   void StoreToSlot(Slot* slot, InitState init_state);
 
-  // Load a property of an object, returning it in a Result.
-  // The object and the property name are passed on the stack, and
-  // not changed.
-  Result EmitKeyedLoad(bool is_global);
+  // Support for compiling assignment expressions.
+  void EmitSlotAssignment(Assignment* node);
+  void EmitNamedPropertyAssignment(Assignment* node);
+  void EmitKeyedPropertyAssignment(Assignment* node);
+
+  // Receiver is passed on the frame and consumed.
+  Result EmitNamedLoad(Handle<String> name, bool is_contextual);
+
+  // Reciever and value are passed on the frame and consumed.
+  Result EmitNamedStore(Handle<String> name);
+
+  // Receiver and key are passed on the frame and consumed.
+  Result EmitKeyedLoad();
+
+  // Receiver, key, and value are passed on the frame and consumed.
+  Result EmitKeyedStore(StaticType* key_type);
 
   // Special code for typeof expressions: Unfortunately, we must
   // be careful when loading the expression in 'typeof'
@@ -537,7 +545,7 @@
   void DeclareGlobals(Handle<FixedArray> pairs);
 
   // Instantiate the function boilerplate.
-  void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+  Result InstantiateBoilerplate(Handle<JSFunction> boilerplate);
 
   // Support for type checks.
   void GenerateIsSmi(ZoneList<Expression*>* args);
@@ -584,6 +592,9 @@
   // Support for direct calls from JavaScript to native RegExp code.
   void GenerateRegExpExec(ZoneList<Expression*>* args);
 
+  // Fast support for number to string.
+  void GenerateNumberToString(ZoneList<Expression*>* args);
+
   // Simple condition analysis.
   enum ConditionAnalysis {
     ALWAYS_TRUE,
@@ -607,15 +618,14 @@
   bool HasValidEntryRegisters();
 #endif
 
-  bool is_eval_;  // Tells whether code is generated for eval.
-  Handle<Script> script_;
   ZoneList<DeferredCode*> deferred_;
 
   // Assembler
   MacroAssembler* masm_;  // to generate code
 
+  CompilationInfo* info_;
+
   // Code generation state
-  Scope* scope_;
   VirtualFrame* frame_;
   RegisterAllocator* allocator_;
   CodeGenState* state_;
@@ -663,13 +673,15 @@
  public:
   GenericBinaryOpStub(Token::Value op,
                       OverwriteMode mode,
-                      GenericBinaryFlags flags)
+                      GenericBinaryFlags flags,
+                      NumberInfo::Type operands_type = NumberInfo::kUnknown)
       : op_(op),
         mode_(mode),
         flags_(flags),
         args_in_registers_(false),
         args_reversed_(false),
-        name_(NULL) {
+        name_(NULL),
+        operands_type_(operands_type) {
     use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
@@ -694,28 +706,32 @@
   bool args_reversed_;  // Left and right argument are swapped.
   bool use_sse3_;
   char* name_;
+  NumberInfo::Type operands_type_;  // Number type information of operands.
 
   const char* GetName();
 
 #ifdef DEBUG
   void Print() {
-    PrintF("GenericBinaryOpStub (op %s), "
-           "(mode %d, flags %d, registers %d, reversed %d)\n",
+    PrintF("GenericBinaryOpStub %d (op %s), "
+           "(mode %d, flags %d, registers %d, reversed %d, number_info %s)\n",
+           MinorKey(),
            Token::String(op_),
            static_cast<int>(mode_),
            static_cast<int>(flags_),
            static_cast<int>(args_in_registers_),
-           static_cast<int>(args_reversed_));
+           static_cast<int>(args_reversed_),
+           NumberInfo::ToString(operands_type_));
   }
 #endif
 
-  // Minor key encoding in 16 bits FRASOOOOOOOOOOMM.
+  // Minor key encoding in 16 bits NNNFRASOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 10> {};
-  class SSE3Bits: public BitField<bool, 12, 1> {};
-  class ArgsInRegistersBits: public BitField<bool, 13, 1> {};
-  class ArgsReversedBits: public BitField<bool, 14, 1> {};
-  class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+  class OpBits: public BitField<Token::Value, 2, 7> {};
+  class SSE3Bits: public BitField<bool, 9, 1> {};
+  class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
+  class ArgsReversedBits: public BitField<bool, 11, 1> {};
+  class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
+  class NumberInfoBits: public BitField<NumberInfo::Type, 13, 3> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
@@ -725,7 +741,8 @@
            | FlagBits::encode(flags_)
            | SSE3Bits::encode(use_sse3_)
            | ArgsInRegistersBits::encode(args_in_registers_)
-           | ArgsReversedBits::encode(args_reversed_);
+           | ArgsReversedBits::encode(args_reversed_)
+           | NumberInfoBits::encode(operands_type_);
   }
 
   void Generate(MacroAssembler* masm);
@@ -750,13 +767,6 @@
 };
 
 
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
-  NO_STRING_ADD_FLAGS = 0,
-  NO_STRING_CHECK_IN_STUB = 1 << 0  // Omit string check in stub.
-};
-
-
 class StringStubBase: public CodeStub {
  public:
   // Generate code for copying characters using a simple loop. This should only
@@ -779,6 +789,38 @@
                                  Register count,    // Must be ecx.
                                  Register scratch,  // Neither of the above.
                                  bool ascii);
+
+  // Probe the symbol table for a two character string. If the string is
+  // not found by probing a jump to the label not_found is performed. This jump
+  // does not guarantee that the string is not in the symbol table. If the
+  // string is found the code falls through with the string in register eax.
+  void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                            Register c1,
+                                            Register c2,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3,
+                                            Label* not_found);
+
+  // Generate string hash.
+  void GenerateHashInit(MacroAssembler* masm,
+                        Register hash,
+                        Register character,
+                        Register scratch);
+  void GenerateHashAddCharacter(MacroAssembler* masm,
+                                Register hash,
+                                Register character,
+                                Register scratch);
+  void GenerateHashGetHash(MacroAssembler* masm,
+                           Register hash,
+                           Register scratch);
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+  NO_STRING_ADD_FLAGS = 0,
+  NO_STRING_CHECK_IN_STUB = 1 << 0  // Omit string check in stub.
 };
 
 
@@ -833,6 +875,39 @@
 };
 
 
+class NumberToStringStub: public CodeStub {
+ public:
+  NumberToStringStub() { }
+
+  // Generate code to do a lookup in the number string cache. If the number in
+  // the register object is found in the cache the generated code falls through
+  // with the result in the result register. The object and the result register
+  // can be the same. If the number is not found in the cache the code jumps to
+  // the label not_found with only the content of register object unchanged.
+  static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+                                              Register object,
+                                              Register result,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              bool object_is_smi,
+                                              Label* not_found);
+
+ private:
+  Major MajorKey() { return NumberToString; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "NumberToStringStub"; }
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("NumberToStringStub\n");
+  }
+#endif
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 1f34b30..a9e2626 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -125,9 +125,10 @@
 void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
   // Register state for IC load call (from ic-ia32.cc).
   // ----------- S t a t e -------------
+  //  -- eax    : receiver
   //  -- ecx    : name
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, ecx.bit(), false);
+  Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), false);
 }
 
 
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index cb500d5..a3b7016 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -1014,7 +1014,6 @@
           int mod, regop, rm;
           get_modrm(*data, &mod, &regop, &rm);
           const char* mnem = NULL;
-          printf("%d\n", regop);
           switch (regop) {
             case 5:  mnem = "subb"; break;
             case 7:  mnem = "cmpb"; break;
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
index 2a15733..9bab75a 100644
--- a/src/ia32/fast-codegen-ia32.cc
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -35,79 +35,152 @@
 
 #define __ ACCESS_MASM(masm())
 
-void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+Register FastCodeGenerator::accumulator0() { return eax; }
+Register FastCodeGenerator::accumulator1() { return edx; }
+Register FastCodeGenerator::scratch0() { return ecx; }
+Register FastCodeGenerator::scratch1() { return edi; }
+Register FastCodeGenerator::receiver_reg() { return ebx; }
+Register FastCodeGenerator::context_reg() { return esi; }
+
+
+void FastCodeGenerator::EmitLoadReceiver() {
   // Offset 2 is due to return address and saved frame pointer.
   int index = 2 + function()->scope()->num_parameters();
-  __ mov(reg, Operand(ebp, index * kPointerSize));
+  __ mov(receiver_reg(), Operand(ebp, index * kPointerSize));
 }
 
 
-void FastCodeGenerator::EmitReceiverMapCheck() {
-  Comment cmnt(masm(), ";; MapCheck(this)");
-  if (FLAG_print_ir) {
-    PrintF("MapCheck(this)\n");
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
+  ASSERT(!destination().is(no_reg));
+  ASSERT(cell->IsJSGlobalPropertyCell());
+
+  __ mov(destination(), Immediate(cell));
+  __ mov(destination(),
+         FieldOperand(destination(), JSGlobalPropertyCell::kValueOffset));
+  if (FLAG_debug_code) {
+    __ cmp(destination(), Factory::the_hole_value());
+    __ Check(not_equal, "DontDelete cells can't contain the hole");
   }
 
-  EmitLoadReceiver(edx);
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, bailout());
-
-  ASSERT(has_receiver() && receiver()->IsHeapObject());
-  Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
-  Handle<Map> map(object->map());
-  __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Immediate(map));
-  __ j(not_equal, bailout());
-}
-
-
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
-  // Compile global variable accesses as load IC calls.  The only live
-  // registers are esi (context) and possibly edx (this).  Both are also
-  // saved in the stack and esi is preserved by the call.
-  __ push(CodeGenerator::GlobalObject());
-  __ mov(ecx, name);
-  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-  if (has_this_properties()) {
-    // Restore this.
-    EmitLoadReceiver(edx);
-  } else {
-    __ nop();  // Not test eax, indicates IC has no inlined code at call site.
-  }
+  // The loaded value is not known to be a smi.
+  clear_as_smi(destination());
 }
 
 
 void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
   LookupResult lookup;
-  receiver()->Lookup(*name, &lookup);
+  info()->receiver()->Lookup(*name, &lookup);
 
-  ASSERT(lookup.holder() == *receiver());
+  ASSERT(lookup.holder() == *info()->receiver());
   ASSERT(lookup.type() == FIELD);
-  Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+  Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
   int index = lookup.GetFieldIndex() - map->inobject_properties();
   int offset = index * kPointerSize;
 
-  // Negative offsets are inobject properties.
+  // We will emit the write barrier unless the stored value is statically
+  // known to be a smi.
+  bool needs_write_barrier = !is_smi(accumulator0());
+
+  // Perform the store.  Negative offsets are inobject properties.
   if (offset < 0) {
     offset += map->instance_size();
-    __ mov(ecx, edx);  // Copy receiver for write barrier.
+    __ mov(FieldOperand(receiver_reg(), offset), accumulator0());
+    if (needs_write_barrier) {
+      // Preserve receiver from write barrier.
+      __ mov(scratch0(), receiver_reg());
+    }
   } else {
     offset += FixedArray::kHeaderSize;
-    __ mov(ecx, FieldOperand(edx, JSObject::kPropertiesOffset));
+    __ mov(scratch0(),
+           FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
+    __ mov(FieldOperand(scratch0(), offset), accumulator0());
   }
-  // Perform the store.
-  __ mov(FieldOperand(ecx, offset), eax);
-  // Preserve value from write barrier in case it's needed.
-  __ mov(ebx, eax);
-  __ RecordWrite(ecx, offset, ebx, edi);
+
+  if (needs_write_barrier) {
+    if (destination().is(no_reg)) {
+      // After RecordWrite accumulator0 is only accidently a smi, but it is
+      // already marked as not known to be one.
+      __ RecordWrite(scratch0(), offset, accumulator0(), scratch1());
+    } else {
+      // Copy the value to the other accumulator to preserve a copy from the
+      // write barrier. One of the accumulators is available as a scratch
+      // register.  Neither is a smi.
+      __ mov(accumulator1(), accumulator0());
+      clear_as_smi(accumulator1());
+      Register value_scratch = other_accumulator(destination());
+      __ RecordWrite(scratch0(), offset, value_scratch, scratch1());
+    }
+  } else if (destination().is(accumulator1())) {
+    __ mov(accumulator1(), accumulator0());
+    // Is a smi because we do not need the write barrier.
+    set_as_smi(accumulator1());
+  }
 }
 
 
-void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
-  ASSERT(function_ == NULL);
+void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
+  ASSERT(!destination().is(no_reg));
+  LookupResult lookup;
+  info()->receiver()->Lookup(*name, &lookup);
+
+  ASSERT(lookup.holder() == *info()->receiver());
+  ASSERT(lookup.type() == FIELD);
+  Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
+  int index = lookup.GetFieldIndex() - map->inobject_properties();
+  int offset = index * kPointerSize;
+
+  // Perform the load.  Negative offsets are inobject properties.
+  if (offset < 0) {
+    offset += map->instance_size();
+    __ mov(destination(), FieldOperand(receiver_reg(), offset));
+  } else {
+    offset += FixedArray::kHeaderSize;
+    __ mov(scratch0(),
+           FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
+    __ mov(destination(), FieldOperand(scratch0(), offset));
+  }
+
+  // The loaded value is not known to be a smi.
+  clear_as_smi(destination());
+}
+
+
+void FastCodeGenerator::EmitBitOr() {
+  if (is_smi(accumulator0()) && is_smi(accumulator1())) {
+    // If both operands are known to be a smi then there is no need to check
+    // the operands or result.  There is no need to perform the operation in
+    // an effect context.
+    if (!destination().is(no_reg)) {
+      // Leave the result in the destination register.  Bitwise or is
+      // commutative.
+      __ or_(destination(), Operand(other_accumulator(destination())));
+    }
+  } else if (destination().is(no_reg)) {
+    // Result is not needed but do not clobber the operands in case of
+    // bailout.
+    __ mov(scratch0(), accumulator1());
+    __ or_(scratch0(), Operand(accumulator0()));
+    __ test(scratch0(), Immediate(kSmiTagMask));
+    __ j(not_zero, bailout(), not_taken);
+  } else {
+    // Preserve the destination operand in a scratch register in case of
+    // bailout.
+    __ mov(scratch0(), destination());
+    __ or_(destination(), Operand(other_accumulator(destination())));
+    __ test(destination(), Immediate(kSmiTagMask));
+    __ j(not_zero, bailout(), not_taken);
+  }
+
+  // If we didn't bailout, the result (in fact, both inputs too) is known to
+  // be a smi.
+  set_as_smi(accumulator0());
+  set_as_smi(accumulator1());
+}
+
+
+void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
   ASSERT(info_ == NULL);
-  function_ = fun;
-  info_ = info;
+  info_ = compilation_info;
 
   // Save the caller's frame pointer and set up our own.
   Comment prologue_cmnt(masm(), ";; Prologue");
@@ -118,18 +191,42 @@
   // Note that we keep a live register reference to esi (context) at this
   // point.
 
-  // Receiver (this) is allocated to edx if there are this properties.
-  if (has_this_properties()) EmitReceiverMapCheck();
+  // Receiver (this) is allocated to a fixed register.
+  if (info()->has_this_properties()) {
+    Comment cmnt(masm(), ";; MapCheck(this)");
+    if (FLAG_print_ir) {
+      PrintF("#: MapCheck(this)\n");
+    }
+    ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
+    Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
+    Handle<Map> map(object->map());
+    EmitLoadReceiver();
+    __ CheckMap(receiver_reg(), map, bailout(), false);
+  }
 
-  VisitStatements(fun->body());
+  // If there is a global variable access check if the global object is the
+  // same as at lazy-compilation time.
+  if (info()->has_globals()) {
+    Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
+    if (FLAG_print_ir) {
+      PrintF("#: MapCheck(GLOBAL)\n");
+    }
+    ASSERT(info()->has_global_object());
+    Handle<Map> map(info()->global_object()->map());
+    __ mov(scratch0(), CodeGenerator::GlobalObject());
+    __ CheckMap(scratch0(), map, bailout(), true);
+  }
+
+  VisitStatements(function()->body());
 
   Comment return_cmnt(masm(), ";; Return(<undefined>)");
+  if (FLAG_print_ir) {
+    PrintF("#: Return(<undefined>)\n");
+  }
   __ mov(eax, Factory::undefined_value());
-
-  Comment epilogue_cmnt(masm(), ";; Epilogue");
   __ mov(esp, ebp);
   __ pop(ebp);
-  __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+  __ ret((scope()->num_parameters() + 1) * kPointerSize);
 
   __ bind(&bailout_);
 }
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 9f9ac56..2394bed 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -51,9 +51,10 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-ia32.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
-  function_ = fun;
-  SetFunctionPosition(fun);
+void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+  ASSERT(info_ == NULL);
+  info_ = info;
+  SetFunctionPosition(function());
 
   if (mode == PRIMARY) {
     __ push(ebp);  // Caller's frame pointer.
@@ -62,7 +63,7 @@
     __ push(edi);  // Callee's JS Function.
 
     { Comment cmnt(masm_, "[ Allocate locals");
-      int locals_count = fun->scope()->num_stack_slots();
+      int locals_count = scope()->num_stack_slots();
       if (locals_count == 1) {
         __ push(Immediate(Factory::undefined_value()));
       } else if (locals_count > 1) {
@@ -76,7 +77,7 @@
     bool function_in_register = true;
 
     // Possibly allocate a local context.
-    if (fun->scope()->num_heap_slots() > 0) {
+    if (scope()->num_heap_slots() > 0) {
       Comment cmnt(masm_, "[ Allocate local context");
       // Argument to NewContext is the function, which is still in edi.
       __ push(edi);
@@ -87,9 +88,9 @@
       __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
 
       // Copy parameters into context if necessary.
-      int num_parameters = fun->scope()->num_parameters();
+      int num_parameters = scope()->num_parameters();
       for (int i = 0; i < num_parameters; i++) {
-        Slot* slot = fun->scope()->parameter(i)->slot();
+        Slot* slot = scope()->parameter(i)->slot();
         if (slot != NULL && slot->type() == Slot::CONTEXT) {
           int parameter_offset = StandardFrameConstants::kCallerSPOffset +
                                      (num_parameters - 1 - i) * kPointerSize;
@@ -107,7 +108,7 @@
       }
     }
 
-    Variable* arguments = fun->scope()->arguments()->AsVariable();
+    Variable* arguments = scope()->arguments()->AsVariable();
     if (arguments != NULL) {
       // Function uses arguments object.
       Comment cmnt(masm_, "[ Allocate arguments object");
@@ -117,10 +118,11 @@
         __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
       }
       // Receiver is just before the parameters on the caller's stack.
-      __ lea(edx, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
-                          fun->num_parameters() * kPointerSize));
+      int offset = scope()->num_parameters() * kPointerSize;
+      __ lea(edx,
+             Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
       __ push(edx);
-      __ push(Immediate(Smi::FromInt(fun->num_parameters())));
+      __ push(Immediate(Smi::FromInt(scope()->num_parameters())));
       // Arguments to ArgumentsAccessStub:
       //   function, receiver address, parameter count.
       // The stub will rewrite receiver and parameter count if the previous
@@ -130,13 +132,13 @@
       __ mov(ecx, eax);  // Duplicate result.
       Move(arguments->slot(), eax, ebx, edx);
       Slot* dot_arguments_slot =
-          fun->scope()->arguments_shadow()->AsVariable()->slot();
+          scope()->arguments_shadow()->AsVariable()->slot();
       Move(dot_arguments_slot, ecx, ebx, edx);
     }
   }
 
   { Comment cmnt(masm_, "[ Declarations");
-    VisitDeclarations(fun->scope()->declarations());
+    VisitDeclarations(scope()->declarations());
   }
 
   { Comment cmnt(masm_, "[ Stack check");
@@ -156,14 +158,14 @@
 
   { Comment cmnt(masm_, "[ Body");
     ASSERT(loop_depth() == 0);
-    VisitStatements(fun->body());
+    VisitStatements(function()->body());
     ASSERT(loop_depth() == 0);
   }
 
   { Comment cmnt(masm_, "[ return <undefined>;");
     // Emit a 'return undefined' in case control fell off the end of the body.
     __ mov(eax, Factory::undefined_value());
-    EmitReturnSequence(function_->end_position());
+    EmitReturnSequence(function()->end_position());
   }
 }
 
@@ -190,7 +192,7 @@
     // patch with the code required by the debugger.
     __ mov(esp, ebp);
     __ pop(ebp);
-    __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+    __ ret((scope()->num_parameters() + 1) * kPointerSize);
 #ifdef ENABLE_DEBUGGER_SUPPORT
     // Check that the size of the code used for returning matches what is
     // expected by the debugger.
@@ -627,7 +629,7 @@
       return Operand(ebp, SlotOffset(slot));
     case Slot::CONTEXT: {
       int context_chain_length =
-          function_->scope()->ContextChainLength(slot->var()->scope());
+          scope()->ContextChainLength(slot->var()->scope());
       __ LoadContext(scratch, context_chain_length);
       return CodeGenerator::ContextOperand(scratch, slot->index());
     }
@@ -686,7 +688,7 @@
         // this specific context.
 
         // The variable in the decl always resides in the current context.
-        ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+        ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
         if (FLAG_debug_code) {
           // Check if we have the correct context pointer.
           __ mov(ebx,
@@ -764,7 +766,7 @@
   // Call the runtime to declare the globals.
   __ push(esi);  // The context is the first argument.
   __ push(Immediate(pairs));
-  __ push(Immediate(Smi::FromInt(is_eval_ ? 1 : 0)));
+  __ push(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
   __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
@@ -775,7 +777,7 @@
 
   // Build the function boilerplate and instantiate it.
   Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(expr, script_, this);
+      Compiler::BuildBoilerplate(expr, script(), this);
   if (HasStackOverflow()) return;
 
   ASSERT(boilerplate->IsBoilerplate());
@@ -806,7 +808,7 @@
     Comment cmnt(masm_, "Global variable");
     // Use inline caching. Variable name is passed in ecx and the global
     // object on the stack.
-    __ push(CodeGenerator::GlobalObject());
+    __ mov(eax, CodeGenerator::GlobalObject());
     __ mov(ecx, var->name());
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
@@ -815,7 +817,7 @@
     // Remember that the assembler may choose to do peephole optimization
     // (eg, push/pop elimination).
     __ nop();
-    DropAndApply(1, context, eax);
+    Apply(context, eax);
 
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
     Comment cmnt(masm_, "Lookup slot");
@@ -843,7 +845,7 @@
 
     // Load the object.
     MemOperand object_loc = EmitSlotSearch(object_slot, eax);
-    __ push(object_loc);
+    __ mov(edx, object_loc);
 
     // Assert that the key is a smi.
     Literal* key_literal = property->key()->AsLiteral();
@@ -851,7 +853,7 @@
     ASSERT(key_literal->handle()->IsSmi());
 
     // Load the key.
-    __ push(Immediate(key_literal->handle()));
+    __ mov(eax, Immediate(key_literal->handle()));
 
     // Do a keyed property load.
     Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -860,7 +862,7 @@
     // call. It is treated specially by the LoadIC code.
     __ nop();
     // Drop key and object left on the stack by IC.
-    DropAndApply(2, context, eax);
+    Apply(context, eax);
   }
 }
 
@@ -1011,6 +1013,99 @@
 }
 
 
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+  Comment cmnt(masm_, "[ Assignment");
+  ASSERT(expr->op() != Token::INIT_CONST);
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->target()->AsProperty();
+  if (prop != NULL) {
+    assign_type =
+        (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+  }
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      if (expr->is_compound()) {
+        // We need the receiver both on the stack and in the accumulator.
+        VisitForValue(prop->obj(), kAccumulator);
+        __ push(result_register());
+      } else {
+        VisitForValue(prop->obj(), kStack);
+      }
+      break;
+    case KEYED_PROPERTY:
+      if (expr->is_compound()) {
+        VisitForValue(prop->obj(), kStack);
+        VisitForValue(prop->key(), kAccumulator);
+        __ mov(edx, Operand(esp, 0));
+        __ push(eax);
+      } else {
+        VisitForValue(prop->obj(), kStack);
+        VisitForValue(prop->key(), kStack);
+      }
+      break;
+  }
+
+  // If we have a compound assignment: Get value of LHS expression and
+  // store in on top of the stack.
+  if (expr->is_compound()) {
+    Location saved_location = location_;
+    location_ = kStack;
+    switch (assign_type) {
+      case VARIABLE:
+        EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
+                         Expression::kValue);
+        break;
+      case NAMED_PROPERTY:
+        EmitNamedPropertyLoad(prop);
+        __ push(result_register());
+        break;
+      case KEYED_PROPERTY:
+        EmitKeyedPropertyLoad(prop);
+        __ push(result_register());
+        break;
+    }
+    location_ = saved_location;
+  }
+
+  // Evaluate RHS expression.
+  Expression* rhs = expr->value();
+  VisitForValue(rhs, kAccumulator);
+
+  // If we have a compound assignment: Apply operator.
+  if (expr->is_compound()) {
+    Location saved_location = location_;
+    location_ = kAccumulator;
+    EmitBinaryOp(expr->binary_op(), Expression::kValue);
+    location_ = saved_location;
+  }
+
+  // Record source position before possible IC call.
+  SetSourcePosition(expr->position());
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE:
+      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             context_);
+      break;
+    case NAMED_PROPERTY:
+      EmitNamedPropertyAssignment(expr);
+      break;
+    case KEYED_PROPERTY:
+      EmitKeyedPropertyAssignment(expr);
+      break;
+  }
+}
+
+
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
@@ -1181,18 +1276,16 @@
   Comment cmnt(masm_, "[ Property");
   Expression* key = expr->key();
 
-  // Evaluate the receiver.
-  VisitForValue(expr->obj(), kStack);
-
   if (key->IsPropertyName()) {
+    VisitForValue(expr->obj(), kAccumulator);
     EmitNamedPropertyLoad(expr);
-    // Drop receiver left on the stack by IC.
-    DropAndApply(1, context_, eax);
+    Apply(context_, eax);
   } else {
-    VisitForValue(expr->key(), kStack);
+    VisitForValue(expr->obj(), kStack);
+    VisitForValue(expr->key(), kAccumulator);
+    __ pop(edx);
     EmitKeyedPropertyLoad(expr);
-    // Drop key and receiver left on the stack by IC.
-    DropAndApply(2, context_, eax);
+    Apply(context_, eax);
   }
 }
 
@@ -1263,25 +1356,31 @@
       // Call to a keyed property, use keyed load IC followed by function
       // call.
       VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kStack);
+      VisitForValue(prop->key(), kAccumulator);
       // Record source code position for IC call.
       SetSourcePosition(prop->position());
+      if (prop->is_synthetic()) {
+        __ pop(edx);  // We do not need to keep the receiver.
+      } else {
+        __ mov(edx, Operand(esp, 0));  // Keep receiver, to call function on.
+      }
+
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
       __ call(ic, RelocInfo::CODE_TARGET);
       // By emitting a nop we make sure that we do not have a "test eax,..."
       // instruction after the call it is treated specially by the LoadIC code.
       __ nop();
-      // Drop key left on the stack by IC.
-      __ Drop(1);
-      // Pop receiver.
-      __ pop(ebx);
-      // Push result (function).
-      __ push(eax);
-      // Push receiver object on stack.
       if (prop->is_synthetic()) {
+        // Push result (function).
+        __ push(eax);
+        // Push Global receiver.
         __ mov(ecx, CodeGenerator::GlobalObject());
         __ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
       } else {
+        // Pop receiver.
+        __ pop(ebx);
+        // Push result (function).
+        __ push(eax);
         __ push(ebx);
       }
       EmitCallWithStub(expr);
@@ -1453,13 +1552,13 @@
           !proxy->var()->is_this() &&
           proxy->var()->is_global()) {
         Comment cmnt(masm_, "Global variable");
-        __ push(CodeGenerator::GlobalObject());
+        __ mov(eax, CodeGenerator::GlobalObject());
         __ mov(ecx, Immediate(proxy->name()));
         Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
         // Use a regular load, not a contextual load, to avoid a reference
         // error.
         __ call(ic, RelocInfo::CODE_TARGET);
-        __ mov(Operand(esp, 0), eax);
+        __ push(eax);
       } else if (proxy != NULL &&
                  proxy->var()->slot() != NULL &&
                  proxy->var()->slot()->type() == Slot::LOOKUP) {
@@ -1563,11 +1662,16 @@
     if (expr->is_postfix() && context_ != Expression::kEffect) {
       __ push(Immediate(Smi::FromInt(0)));
     }
-    VisitForValue(prop->obj(), kStack);
     if (assign_type == NAMED_PROPERTY) {
+      // Put the object both on the stack and in the accumulator.
+      VisitForValue(prop->obj(), kAccumulator);
+      __ push(eax);
       EmitNamedPropertyLoad(prop);
     } else {
-      VisitForValue(prop->key(), kStack);
+      VisitForValue(prop->obj(), kStack);
+      VisitForValue(prop->key(), kAccumulator);
+      __ mov(edx, Operand(esp, 0));
+      __ push(eax);
       EmitKeyedPropertyLoad(prop);
     }
   }
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 44dae3b..fcc8271 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -50,28 +50,29 @@
 // or if name is not a symbol, and will jump to the miss_label in that case.
 static void GenerateDictionaryLoad(MacroAssembler* masm,
                                    Label* miss_label,
+                                   Register receiver,
+                                   Register name,
                                    Register r0,
                                    Register r1,
                                    Register r2,
-                                   Register name,
                                    DictionaryCheck check_dictionary) {
   // Register use:
   //
+  // name - holds the name of the property and is unchanged.
+  // receiver - holds the receiver and is unchanged.
+  // Scratch registers:
   // r0   - used to hold the property dictionary.
   //
-  // r1   - initially the receiver
-  //      - used for the index into the property dictionary
+  // r1   - used for the index into the property dictionary
   //      - holds the result on exit.
   //
   // r2   - used to hold the capacity of the property dictionary.
-  //
-  // name - holds the name of the property and is unchanged.
 
   Label done;
 
   // Check for the absence of an interceptor.
   // Load the map into r0.
-  __ mov(r0, FieldOperand(r1, JSObject::kMapOffset));
+  __ mov(r0, FieldOperand(receiver, JSObject::kMapOffset));
   // Test the has_named_interceptor bit in the map.
   __ test(FieldOperand(r0, Map::kInstanceAttributesOffset),
           Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
@@ -91,7 +92,7 @@
   __ j(equal, miss_label, not_taken);
 
   // Load properties array.
-  __ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
+  __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
 
   // Check that the properties array is a dictionary.
   if (check_dictionary == CHECK_DICTIONARY) {
@@ -176,14 +177,12 @@
 
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
+  //  -- eax    : receiver
   //  -- ecx    : name
   //  -- esp[0] : return address
-  //  -- esp[4] : receiver
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, Operand(esp, kPointerSize));
-
   StubCompiler::GenerateLoadArrayLength(masm, eax, edx, &miss);
   __ bind(&miss);
   StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -192,15 +191,13 @@
 
 void LoadIC::GenerateStringLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
+  //  -- eax    : receiver
   //  -- ecx    : name
   //  -- esp[0] : return address
-  //  -- esp[4] : receiver
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, Operand(esp, kPointerSize));
-
-  StubCompiler::GenerateLoadStringLength(masm, eax, edx, &miss);
+  StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss);
   __ bind(&miss);
   StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
 }
@@ -208,14 +205,12 @@
 
 void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
   // ----------- S t a t e -------------
+  //  -- eax    : receiver
   //  -- ecx    : name
   //  -- esp[0] : return address
-  //  -- esp[4] : receiver
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, Operand(esp, kPointerSize));
-
   StubCompiler::GenerateLoadFunctionPrototype(masm, eax, edx, ebx, &miss);
   __ bind(&miss);
   StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -224,26 +219,22 @@
 
 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
   // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : name
-  //  -- esp[8] : receiver
   // -----------------------------------
   Label slow, check_string, index_int, index_string;
   Label check_pixel_array, probe_dictionary;
 
-  // Load name and receiver.
-  __ mov(eax, Operand(esp, kPointerSize));
-  __ mov(ecx, Operand(esp, 2 * kPointerSize));
-
   // Check that the object isn't a smi.
-  __ test(ecx, Immediate(kSmiTagMask));
+  __ test(edx, Immediate(kSmiTagMask));
   __ j(zero, &slow, not_taken);
 
   // Get the map of the receiver.
-  __ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
 
   // Check bit field.
-  __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+  __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
   __ test(ebx, Immediate(kSlowCaseBitFieldMask));
   __ j(not_zero, &slow, not_taken);
   // Check that the object is some kind of JS object EXCEPT JS Value type.
@@ -251,56 +242,58 @@
   // we enter the runtime system to make sure that indexing
   // into string objects work as intended.
   ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
-  __ cmp(edx, JS_OBJECT_TYPE);
-  __ j(less, &slow, not_taken);
+  __ CmpInstanceType(ecx, JS_OBJECT_TYPE);
+  __ j(below, &slow, not_taken);
   // Check that the key is a smi.
   __ test(eax, Immediate(kSmiTagMask));
   __ j(not_zero, &check_string, not_taken);
-  __ sar(eax, kSmiTagSize);
+  __ mov(ebx, eax);
+  __ SmiUntag(ebx);
   // Get the elements array of the object.
   __ bind(&index_int);
-  __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
   // Check that the object is in fast mode (not dictionary).
-  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
-         Immediate(Factory::fixed_array_map()));
-  __ j(not_equal, &check_pixel_array);
+  __ CheckMap(ecx, Factory::fixed_array_map(), &check_pixel_array, true);
   // Check that the key (index) is within bounds.
-  __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
+  __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
   __ j(above_equal, &slow);
   // Fast case: Do the load.
-  __ mov(eax,
-         Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag));
-  __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
+  __ mov(ecx, FieldOperand(ecx, ebx, times_4, FixedArray::kHeaderSize));
+  __ cmp(Operand(ecx), Immediate(Factory::the_hole_value()));
   // In case the loaded value is the_hole we have to consult GetProperty
   // to ensure the prototype chain is searched.
   __ j(equal, &slow);
+  __ mov(eax, ecx);
   __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
   __ ret(0);
 
-  // Check whether the elements is a pixel array.
-  // eax: untagged index
-  // ecx: elements array
   __ bind(&check_pixel_array);
-  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
-         Immediate(Factory::pixel_array_map()));
-  __ j(not_equal, &slow);
-  __ cmp(eax, FieldOperand(ecx, PixelArray::kLengthOffset));
+  // Check whether the elements is a pixel array.
+  // edx: receiver
+  // ebx: untagged index
+  // eax: key
+  // ecx: elements
+  __ CheckMap(ecx, Factory::pixel_array_map(), &slow, true);
+  __ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
   __ j(above_equal, &slow);
-  __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
-  __ movzx_b(eax, Operand(ecx, eax, times_1, 0));
-  __ shl(eax, kSmiTagSize);
+  __ mov(eax, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
+  __ movzx_b(eax, Operand(eax, ebx, times_1, 0));
+  __ SmiTag(eax);
   __ ret(0);
 
-  // Slow case: Load name and receiver from stack and jump to runtime.
   __ bind(&slow);
+  // Slow case: jump to runtime.
+  // edx: receiver
+  // eax: key
   __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
-  Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+  GenerateRuntimeGetProperty(masm);
 
   __ bind(&check_string);
   // The key is not a smi.
   // Is it a string?
-  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
+  // edx: receiver
+  // eax: key
+  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
   __ j(above_equal, &slow);
   // Is the string an array index, with cached numeric value?
   __ mov(ebx, FieldOperand(eax, String::kHashFieldOffset));
@@ -308,55 +301,58 @@
   __ j(not_zero, &index_string, not_taken);
 
   // Is the string a symbol?
-  __ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset));
+  __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceTypeOffset));
   ASSERT(kSymbolTag != 0);
   __ test(ebx, Immediate(kIsSymbolMask));
   __ j(zero, &slow, not_taken);
 
   // If the receiver is a fast-case object, check the keyed lookup
-  // cache. Otherwise probe the dictionary leaving result in ecx.
-  __ mov(ebx, FieldOperand(ecx, JSObject::kPropertiesOffset));
+  // cache. Otherwise probe the dictionary.
+  __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
   __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
          Immediate(Factory::hash_table_map()));
   __ j(equal, &probe_dictionary);
 
   // Load the map of the receiver, compute the keyed lookup cache hash
   // based on 32 bits of the map pointer and the string hash.
-  __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ mov(edx, ebx);
-  __ shr(edx, KeyedLookupCache::kMapHashShift);
-  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
-  __ shr(eax, String::kHashShift);
-  __ xor_(edx, Operand(eax));
-  __ and_(edx, KeyedLookupCache::kCapacityMask);
+  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ mov(ecx, ebx);
+  __ shr(ecx, KeyedLookupCache::kMapHashShift);
+  __ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
+  __ shr(edi, String::kHashShift);
+  __ xor_(ecx, Operand(edi));
+  __ and_(ecx, KeyedLookupCache::kCapacityMask);
 
   // Load the key (consisting of map and symbol) from the cache and
   // check for match.
   ExternalReference cache_keys
       = ExternalReference::keyed_lookup_cache_keys();
-  __ mov(edi, edx);
+  __ mov(edi, ecx);
   __ shl(edi, kPointerSizeLog2 + 1);
   __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
   __ j(not_equal, &slow);
   __ add(Operand(edi), Immediate(kPointerSize));
-  __ mov(edi, Operand::StaticArray(edi, times_1, cache_keys));
-  __ cmp(edi, Operand(esp, kPointerSize));
+  __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
   __ j(not_equal, &slow);
 
   // Get field offset and check that it is an in-object property.
+  // edx     : receiver
+  // ebx     : receiver's map
+  // eax     : key
+  // ecx     : lookup cache index
   ExternalReference cache_field_offsets
       = ExternalReference::keyed_lookup_cache_field_offsets();
-  __ mov(eax,
-         Operand::StaticArray(edx, times_pointer_size, cache_field_offsets));
-  __ movzx_b(edx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
-  __ cmp(eax, Operand(edx));
+  __ mov(edi,
+         Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
+  __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+  __ cmp(edi, Operand(ecx));
   __ j(above_equal, &slow);
 
   // Load in-object property.
-  __ sub(eax, Operand(edx));
-  __ movzx_b(edx, FieldOperand(ebx, Map::kInstanceSizeOffset));
-  __ add(eax, Operand(edx));
-  __ mov(eax, FieldOperand(ecx, eax, times_pointer_size, 0));
+  __ sub(edi, Operand(ecx));
+  __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
+  __ add(ecx, Operand(edi));
+  __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
   __ ret(0);
 
   // Do a quick inline probe of the receiver's dictionary, if it
@@ -364,13 +360,14 @@
   __ bind(&probe_dictionary);
   GenerateDictionaryLoad(masm,
                          &slow,
-                         ebx,
-                         ecx,
                          edx,
                          eax,
+                         ebx,
+                         ecx,
+                         edi,
                          DICTIONARY_CHECK_DONE);
-  GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
-  __ mov(eax, Operand(ecx));
+  GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, ebx);
+  __ mov(eax, ecx);
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
   __ ret(0);
 
@@ -381,51 +378,47 @@
   ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
          (1 << String::kArrayIndexValueBits));
   __ bind(&index_string);
-  __ mov(eax, Operand(ebx));
-  __ and_(eax, String::kArrayIndexHashMask);
-  __ shr(eax, String::kHashShift);
+  __ and_(ebx, String::kArrayIndexHashMask);
+  __ shr(ebx, String::kHashShift);
   __ jmp(&index_int);
 }
 
 
 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
   // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : key
-  //  -- esp[8] : receiver
   // -----------------------------------
   Label miss, index_ok;
 
   // Pop return address.
   // Performing the load early is better in the common case.
-  __ pop(eax);
+  __ pop(ebx);
 
-  __ mov(ebx, Operand(esp, 1 * kPointerSize));
-  __ test(ebx, Immediate(kSmiTagMask));
+  __ test(edx, Immediate(kSmiTagMask));
   __ j(zero, &miss);
-  __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
   __ test(ecx, Immediate(kIsNotStringMask));
   __ j(not_zero, &miss);
 
   // Check if key is a smi or a heap number.
-  __ mov(edx, Operand(esp, 0));
-  __ test(edx, Immediate(kSmiTagMask));
+  __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, &index_ok);
-  __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
   __ cmp(ecx, Factory::heap_number_map());
   __ j(not_equal, &miss);
 
   __ bind(&index_ok);
-  // Duplicate receiver and key since they are expected on the stack after
-  // the KeyedLoadIC call.
-  __ push(ebx);  // receiver
-  __ push(edx);  // key
-  __ push(eax);  // return address
+  // Push receiver and key on the stack, and make a tail call.
+  __ push(edx);  // receiver
+  __ push(eax);  // key
+  __ push(ebx);  // return address
   __ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_FUNCTION);
 
   __ bind(&miss);
-  __ push(eax);
+  __ push(ebx);
   GenerateMiss(masm);
 }
 
@@ -433,18 +426,14 @@
 void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
                                         ExternalArrayType array_type) {
   // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : key
-  //  -- esp[8] : receiver
   // -----------------------------------
   Label slow, failed_allocation;
 
-  // Load name and receiver.
-  __ mov(eax, Operand(esp, kPointerSize));
-  __ mov(ecx, Operand(esp, 2 * kPointerSize));
-
   // Check that the object isn't a smi.
-  __ test(ecx, Immediate(kSmiTagMask));
+  __ test(edx, Immediate(kSmiTagMask));
   __ j(zero, &slow, not_taken);
 
   // Check that the key is a smi.
@@ -452,59 +441,56 @@
   __ j(not_zero, &slow, not_taken);
 
   // Get the map of the receiver.
-  __ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to check this explicitly since this generic stub does not perform
   // map checks.
-  __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+  __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
   __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow, not_taken);
 
-  // Get the instance type from the map of the receiver.
-  __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
-  // Check that the object is a JS object.
-  __ cmp(edx, JS_OBJECT_TYPE);
+  __ CmpInstanceType(ecx, JS_OBJECT_TYPE);
   __ j(not_equal, &slow, not_taken);
 
   // Check that the elements array is the appropriate type of
   // ExternalArray.
-  // eax: index (as a smi)
-  // ecx: JSObject
-  __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
   Handle<Map> map(Heap::MapForExternalArrayType(array_type));
-  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
          Immediate(map));
   __ j(not_equal, &slow, not_taken);
 
+  // eax: key, known to be a smi.
+  // edx: receiver, known to be a JSObject.
+  // ebx: elements object, known to be an external array.
   // Check that the index is in range.
-  __ sar(eax, kSmiTagSize);  // Untag the index.
-  __ cmp(eax, FieldOperand(ecx, ExternalArray::kLengthOffset));
+  __ mov(ecx, eax);
+  __ SmiUntag(ecx);  // Untag the index.
+  __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
   __ j(above_equal, &slow);
 
-  // eax: untagged index
-  // ecx: elements array
-  __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
-  // ecx: base pointer of external storage
+  __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
+  // ebx: base pointer of external storage
   switch (array_type) {
     case kExternalByteArray:
-      __ movsx_b(eax, Operand(ecx, eax, times_1, 0));
+      __ movsx_b(ecx, Operand(ebx, ecx, times_1, 0));
       break;
     case kExternalUnsignedByteArray:
-      __ movzx_b(eax, Operand(ecx, eax, times_1, 0));
+      __ movzx_b(ecx, Operand(ebx, ecx, times_1, 0));
       break;
     case kExternalShortArray:
-      __ movsx_w(eax, Operand(ecx, eax, times_2, 0));
+      __ movsx_w(ecx, Operand(ebx, ecx, times_2, 0));
       break;
     case kExternalUnsignedShortArray:
-      __ movzx_w(eax, Operand(ecx, eax, times_2, 0));
+      __ movzx_w(ecx, Operand(ebx, ecx, times_2, 0));
       break;
     case kExternalIntArray:
     case kExternalUnsignedIntArray:
-      __ mov(eax, Operand(ecx, eax, times_4, 0));
+      __ mov(ecx, Operand(ebx, ecx, times_4, 0));
       break;
     case kExternalFloatArray:
-      __ fld_s(Operand(ecx, eax, times_4, 0));
+      __ fld_s(Operand(ebx, ecx, times_4, 0));
       break;
     default:
       UNREACHABLE();
@@ -512,7 +498,7 @@
   }
 
   // For integer array types:
-  // eax: value
+  // ecx: value
   // For floating-point array type:
   // FP(0): value
 
@@ -523,21 +509,19 @@
     // it to a HeapNumber.
     Label box_int;
     if (array_type == kExternalIntArray) {
-      // See Smi::IsValid for why this works.
-      __ mov(ebx, eax);
-      __ add(Operand(ebx), Immediate(0x40000000));
-      __ cmp(ebx, 0x80000000);
-      __ j(above_equal, &box_int);
+      __ cmp(ecx, 0xC0000000);
+      __ j(sign, &box_int);
     } else {
       ASSERT_EQ(array_type, kExternalUnsignedIntArray);
       // The test is different for unsigned int values. Since we need
-      // the Smi-encoded result to be treated as unsigned, we can't
+      // the value to be in the range of a positive smi, we can't
       // handle either of the top two bits being set in the value.
-      __ test(eax, Immediate(0xC0000000));
+      __ test(ecx, Immediate(0xC0000000));
       __ j(not_zero, &box_int);
     }
 
-    __ shl(eax, kSmiTagSize);
+    __ mov(eax, ecx);
+    __ SmiTag(eax);
     __ ret(0);
 
     __ bind(&box_int);
@@ -545,34 +529,37 @@
     // Allocate a HeapNumber for the int and perform int-to-double
     // conversion.
     if (array_type == kExternalIntArray) {
-      __ push(eax);
+      __ push(ecx);
       __ fild_s(Operand(esp, 0));
-      __ pop(eax);
+      __ pop(ecx);
     } else {
       ASSERT(array_type == kExternalUnsignedIntArray);
       // Need to zero-extend the value.
       // There's no fild variant for unsigned values, so zero-extend
       // to a 64-bit int manually.
       __ push(Immediate(0));
-      __ push(eax);
+      __ push(ecx);
       __ fild_d(Operand(esp, 0));
-      __ pop(eax);
-      __ pop(eax);
+      __ pop(ecx);
+      __ pop(ecx);
     }
     // FP(0): value
-    __ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
+    __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
     // Set the value.
+    __ mov(eax, ecx);
     __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
     __ ret(0);
   } else if (array_type == kExternalFloatArray) {
     // For the floating-point array type, we need to always allocate a
     // HeapNumber.
-    __ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
+    __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
     // Set the value.
+    __ mov(eax, ecx);
     __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
     __ ret(0);
   } else {
-    __ shl(eax, kSmiTagSize);
+    __ mov(eax, ecx);
+    __ SmiTag(eax);
     __ ret(0);
   }
 
@@ -583,10 +570,51 @@
   __ fincstp();
   // Fall through to slow case.
 
-  // Slow case: Load name and receiver from stack and jump to runtime.
+  // Slow case: Load key and receiver from stack and jump to runtime.
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
-  Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+  GenerateRuntimeGetProperty(masm);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label slow;
+
+  // Check that the receiver isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+
+  // Check that the key is a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow, not_taken);
+
+  // Get the map of the receiver.
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+
+  // Check that it has indexed interceptor and access checks
+  // are not enabled for this object.
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
+  __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
+  __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
+  __ j(not_zero, &slow, not_taken);
+
+  // Everything is fine, call runtime.
+  __ pop(ecx);
+  __ push(edx);  // receiver
+  __ push(eax);  // key
+  __ push(ecx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(ExternalReference(
+        IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
+
+  __ bind(&slow);
+  GenerateMiss(masm);
 }
 
 
@@ -645,7 +673,7 @@
 
   // Slow case: call runtime.
   __ bind(&slow);
-  Generate(masm, ExternalReference(Runtime::kSetProperty));
+  GenerateRuntimeSetProperty(masm);
 
   // Check whether the elements is a pixel array.
   // eax: value
@@ -918,7 +946,7 @@
 
   // Slow case: call runtime.
   __ bind(&slow);
-  Generate(masm, ExternalReference(Runtime::kSetProperty));
+  GenerateRuntimeSetProperty(masm);
 }
 
 
@@ -1001,7 +1029,7 @@
 
   // Search dictionary - put result in register edi.
   __ mov(edi, edx);
-  GenerateDictionaryLoad(masm, miss, eax, edi, ebx, ecx, CHECK_DICTIONARY);
+  GenerateDictionaryLoad(masm, miss, edx, ecx, eax, edi, ebx, CHECK_DICTIONARY);
 
   // Check that the result is not a smi.
   __ test(edi, Immediate(kSmiTagMask));
@@ -1150,13 +1178,11 @@
 
 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
   // ----------- S t a t e -------------
+  //  -- eax    : receiver
   //  -- ecx    : name
   //  -- esp[0] : return address
-  //  -- esp[4] : receiver
   // -----------------------------------
 
-  __ mov(eax, Operand(esp, kPointerSize));
-
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
                                          NOT_IN_LOOP,
@@ -1164,20 +1190,18 @@
   StubCache::GenerateProbe(masm, flags, eax, ecx, ebx, edx);
 
   // Cache miss: Jump to runtime.
-  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+  GenerateMiss(masm);
 }
 
 
 void LoadIC::GenerateNormal(MacroAssembler* masm) {
   // ----------- S t a t e -------------
+  //  -- eax    : receiver
   //  -- ecx    : name
   //  -- esp[0] : return address
-  //  -- esp[4] : receiver
   // -----------------------------------
   Label miss, probe, global;
 
-  __ mov(eax, Operand(esp, kPointerSize));
-
   // Check that the receiver isn't a smi.
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, &miss, not_taken);
@@ -1202,8 +1226,16 @@
 
   // Search the dictionary placing the result in eax.
   __ bind(&probe);
-  GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx, CHECK_DICTIONARY);
-  GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx);
+  GenerateDictionaryLoad(masm,
+                         &miss,
+                         eax,
+                         ecx,
+                         edx,
+                         edi,
+                         ebx,
+                         CHECK_DICTIONARY);
+  GenerateCheckNonObjectOrLoaded(masm, &miss, edi, edx);
+  __ mov(eax, edi);
   __ ret(0);
 
   // Global object access: Check access rights.
@@ -1213,37 +1245,24 @@
 
   // Cache miss: Restore receiver from stack and jump to runtime.
   __ bind(&miss);
-  __ mov(eax, Operand(esp, 1 * kPointerSize));
-  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+  GenerateMiss(masm);
 }
 
 
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
+  //  -- eax    : receiver
   //  -- ecx    : name
   //  -- esp[0] : return address
-  //  -- esp[4] : receiver
   // -----------------------------------
 
-  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
-}
-
-
-void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : name
-  //  -- esp[0] : return address
-  //  -- esp[4] : receiver
-  // -----------------------------------
-
-  __ mov(eax, Operand(esp, kPointerSize));
   __ pop(ebx);
   __ push(eax);  // receiver
   __ push(ecx);  // name
   __ push(ebx);  // return address
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(f, 2, 1);
+  __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
 }
 
 
@@ -1347,31 +1366,35 @@
 
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : name
-  //  -- esp[8] : receiver
   // -----------------------------------
 
-  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
-}
-
-
-void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
-  // ----------- S t a t e -------------
-  //  -- esp[0] : return address
-  //  -- esp[4] : name
-  //  -- esp[8] : receiver
-  // -----------------------------------
-
-  __ mov(eax, Operand(esp, kPointerSize));
-  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ pop(ebx);
-  __ push(ecx);  // receiver
+  __ push(edx);  // receiver
   __ push(eax);  // name
   __ push(ebx);  // return address
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(f, 2, 1);
+  __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  __ pop(ebx);
+  __ push(edx);  // receiver
+  __ push(eax);  // name
+  __ push(ebx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(ExternalReference(Runtime::kKeyedGetProperty), 2, 1);
 }
 
 
@@ -1393,26 +1416,6 @@
 }
 
 
-void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : transition map
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);  // receiver
-  __ push(ecx);  // transition map
-  __ push(eax);  // value
-  __ push(ebx);  // return address
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(
-      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
-}
-
-
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : value
@@ -1432,10 +1435,61 @@
 }
 
 
+void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  //
+  // This accepts as a receiver anything JSObject::SetElementsLength accepts
+  // (currently anything except for external and pixel arrays which means
+  // anything with elements of FixedArray type.), but currently is restricted
+  // to JSArray.
+  // Value must be a number, but only smis are accepted as the most common case.
+
+  Label miss;
+
+  Register receiver = edx;
+  Register value = eax;
+  Register scratch = ebx;
+
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the object is a JS array.
+  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+  __ j(not_equal, &miss, not_taken);
+
+  // Check that elements are FixedArray.
+  __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
+  __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
+  __ j(not_equal, &miss, not_taken);
+
+  // Check that value is a smi.
+  __ test(value, Immediate(kSmiTagMask));
+  __ j(not_zero, &miss, not_taken);
+
+  // Prepare tail call to StoreIC_ArrayLength.
+  __ pop(scratch);
+  __ push(receiver);
+  __ push(value);
+  __ push(scratch);  // return address
+
+  __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1);
+
+  __ bind(&miss);
+
+  GenerateMiss(masm);
+}
+
+
 // Defined in ic.cc.
 Object* KeyedStoreIC_Miss(Arguments args);
 
-void KeyedStoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- esp[0] : return address
@@ -1450,28 +1504,26 @@
   __ push(ecx);
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(f, 3, 1);
+  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
 }
 
 
-void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : value
-  //  -- ecx    : transition map
   //  -- esp[0] : return address
   //  -- esp[4] : key
   //  -- esp[8] : receiver
   // -----------------------------------
 
-  __ pop(ebx);
+  __ pop(ecx);
   __ push(Operand(esp, 1 * kPointerSize));
-  __ push(ecx);
+  __ push(Operand(esp, 1 * kPointerSize));
   __ push(eax);
-  __ push(ebx);
+  __ push(ecx);
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(
-      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+  __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
 }
 
 #undef __
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 19a380b..5ae3fe2 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -41,7 +41,6 @@
 
 MacroAssembler::MacroAssembler(void* buffer, int size)
     : Assembler(buffer, size),
-      unresolved_(0),
       generating_stub_(false),
       allow_stub_calls_(true),
       code_object_(Heap::undefined_value()) {
@@ -308,6 +307,13 @@
     }
   }
 }
+
+void MacroAssembler::DebugBreak() {
+  Set(eax, Immediate(0));
+  mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak)));
+  CEntryStub ces(1);
+  call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
 #endif
 
 void MacroAssembler::Set(Register dst, const Immediate& x) {
@@ -338,6 +344,19 @@
 }
 
 
+void MacroAssembler::CheckMap(Register obj,
+                              Handle<Map> map,
+                              Label* fail,
+                              bool is_heap_object) {
+  if (!is_heap_object) {
+    test(obj, Immediate(kSmiTagMask));
+    j(zero, fail);
+  }
+  cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
+  j(not_equal, fail);
+}
+
+
 Condition MacroAssembler::IsObjectStringType(Register heap_object,
                                              Register map,
                                              Register instance_type) {
@@ -364,6 +383,17 @@
 }
 
 
+void MacroAssembler::AbortIfNotNumber(Register object, const char* msg) {
+  Label ok;
+  test(object, Immediate(kSmiTagMask));
+  j(zero, &ok);
+  cmp(FieldOperand(object, HeapObject::kMapOffset),
+      Factory::heap_number_map());
+  Assert(equal, msg);
+  bind(&ok);
+}
+
+
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   push(ebp);
   mov(ebp, Operand(esp));
@@ -396,12 +426,8 @@
 
   // Reserve room for entry stack pointer and push the debug marker.
   ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
-  push(Immediate(0));  // saved entry sp, patched before call
-  if (mode == ExitFrame::MODE_DEBUG) {
-    push(Immediate(0));
-  } else {
-    push(Immediate(CodeObject()));
-  }
+  push(Immediate(0));  // Saved entry sp, patched before call.
+  push(Immediate(CodeObject()));  // Accessed from ExitFrame::code_slot.
 
   // Save the frame pointer and the context in top.
   ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
@@ -538,6 +564,7 @@
 Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
                                    JSObject* holder, Register holder_reg,
                                    Register scratch,
+                                   int save_at_depth,
                                    Label* miss) {
   // Make sure there's no overlap between scratch and the other
   // registers.
@@ -545,7 +572,11 @@
 
   // Keep track of the current object in register reg.
   Register reg = object_reg;
-  int depth = 1;
+  int depth = 0;
+
+  if (save_at_depth == depth) {
+    mov(Operand(esp, kPointerSize), object_reg);
+  }
 
   // Check the maps in the prototype chain.
   // Traverse the prototype chain from the object and do map checks.
@@ -577,7 +608,6 @@
       // to it in the code. Load it from the map.
       reg = holder_reg;  // from now the object is in holder_reg
       mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
-
     } else {
       // Check the map of the current object.
       cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -595,6 +625,10 @@
       mov(reg, Handle<JSObject>(prototype));
     }
 
+    if (save_at_depth == depth) {
+      mov(Operand(esp, kPointerSize), reg);
+    }
+
     // Go to the next object in the prototype chain.
     object = prototype;
   }
@@ -605,7 +639,7 @@
   j(not_equal, miss, not_taken);
 
   // Log the check depth.
-  LOG(IntEvent("check-maps-depth", depth));
+  LOG(IntEvent("check-maps-depth", depth + 1));
 
   // Perform security check for access to the global object and return
   // the holder register.
@@ -1122,6 +1156,16 @@
 }
 
 
+void MacroAssembler::CallExternalReference(ExternalReference ref,
+                                           int num_arguments) {
+  mov(eax, Immediate(num_arguments));
+  mov(ebx, Immediate(ref));
+
+  CEntryStub stub(1);
+  CallStub(&stub);
+}
+
+
 Object* MacroAssembler::TryCallRuntime(Runtime::Function* f,
                                        int num_arguments) {
   if (f->nargs >= 0 && f->nargs != num_arguments) {
@@ -1342,10 +1386,22 @@
 }
 
 
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
-  bool resolved;
-  Handle<Code> code = ResolveBuiltin(id, &resolved);
+void MacroAssembler::InvokeFunction(JSFunction* function,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag) {
+  ASSERT(function->is_compiled());
+  // Get the function and setup the context.
+  mov(edi, Immediate(Handle<JSFunction>(function)));
+  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
+  // Invoke the cached code.
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
   // Calls are not allowed in some stubs.
   ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
 
@@ -1353,55 +1409,22 @@
   // arguments match the expected number of arguments. Fake a
   // parameter count to avoid emitting code to do the check.
   ParameterCount expected(0);
-  InvokeCode(Handle<Code>(code), expected, expected,
-             RelocInfo::CODE_TARGET, flag);
-
-  const char* name = Builtins::GetName(id);
-  int argc = Builtins::GetArgumentsCount(id);
-
-  if (!resolved) {
-    uint32_t flags =
-        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsUseCodeObject::encode(false);
-    Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
-    unresolved_.Add(entry);
-  }
+  GetBuiltinEntry(edx, id);
+  InvokeCode(Operand(edx), expected, expected, flag);
 }
 
 
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
-  bool resolved;
-  Handle<Code> code = ResolveBuiltin(id, &resolved);
-
-  const char* name = Builtins::GetName(id);
-  int argc = Builtins::GetArgumentsCount(id);
-
-  mov(Operand(target), Immediate(code));
-  if (!resolved) {
-    uint32_t flags =
-        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsUseCodeObject::encode(true);
-    Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
-    unresolved_.Add(entry);
-  }
-  add(Operand(target), Immediate(Code::kHeaderSize - kHeapObjectTag));
-}
-
-
-Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
-                                            bool* resolved) {
-  // Move the builtin function into the temporary function slot by
-  // reading it from the builtins object. NOTE: We should be able to
-  // reduce this to two instructions by putting the function table in
-  // the global object instead of the "builtins" object and by using a
-  // real register for the function.
-  mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  mov(edx, FieldOperand(edx, GlobalObject::kBuiltinsOffset));
+  // Load the JavaScript builtin function from the builtins object.
+  mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  mov(edi, FieldOperand(edi, GlobalObject::kBuiltinsOffset));
   int builtins_offset =
       JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
-  mov(edi, FieldOperand(edx, builtins_offset));
-
-  return Builtins::GetCode(id, resolved);
+  mov(edi, FieldOperand(edi, builtins_offset));
+  // Load the code entry point from the function into the target register.
+  mov(target, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  mov(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
+  add(Operand(target), Immediate(Code::kHeaderSize - kHeapObjectTag));
 }
 
 
@@ -1546,6 +1569,20 @@
 }
 
 
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+    Register instance_type,
+    Register scratch,
+    Label *failure) {
+  if (!scratch.is(instance_type)) {
+    mov(scratch, instance_type);
+  }
+  and_(scratch,
+       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
+  cmp(scratch, kStringTag | kSeqStringTag | kAsciiStringTag);
+  j(not_equal, failure);
+}
+
+
 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
                                                          Register object2,
                                                          Register scratch1,
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index cc24560..69dc54c 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -69,6 +69,7 @@
   void CopyRegistersFromStackToMemory(Register base,
                                       Register scratch,
                                       RegList regs);
+  void DebugBreak();
 #endif
 
   // ---------------------------------------------------------------------------
@@ -123,6 +124,10 @@
                       const ParameterCount& actual,
                       InvokeFlag flag);
 
+  void InvokeFunction(JSFunction* function,
+                      const ParameterCount& actual,
+                      InvokeFlag flag);
+
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
@@ -141,6 +146,14 @@
   // Compare instance type for map.
   void CmpInstanceType(Register map, InstanceType type);
 
+  // Check if the map of an object is equal to a specified map and
+  // branch to label if not. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void CheckMap(Register obj,
+                Handle<Map> map,
+                Label* fail,
+                bool is_heap_object);
+
   // Check if the object in register heap_object is a string. Afterwards the
   // register map contains the object map and the register instance_type
   // contains the instance_type. The registers map and instance_type can be the
@@ -163,6 +176,9 @@
     sar(reg, kSmiTagSize);
   }
 
+  // Abort execution if argument is not a number. Used in debug code.
+  void AbortIfNotNumber(Register object, const char* msg);
+
   // ---------------------------------------------------------------------------
   // Exception handling
 
@@ -185,9 +201,14 @@
   // clobbered if it the same as the holder register. The function
   // returns a register containing the holder - either object_reg or
   // holder_reg.
+  // The function can optionally (when save_at_depth !=
+  // kInvalidProtoDepth) save the object at the given depth by moving
+  // it to [esp + kPointerSize].
   Register CheckMaps(JSObject* object, Register object_reg,
                      JSObject* holder, Register holder_reg,
-                     Register scratch, Label* miss);
+                     Register scratch,
+                     int save_at_depth,
+                     Label* miss);
 
   // Generate code for checking access rights - used for security checks
   // on access to global objects across environments. The holder register
@@ -339,6 +360,9 @@
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId id, int num_arguments);
 
+  // Convenience function: call an external reference.
+  void CallExternalReference(ExternalReference ref, int num_arguments);
+
   // Convenience function: Same as above, but takes the fid instead.
   Object* TryCallRuntime(Runtime::FunctionId id, int num_arguments);
 
@@ -376,13 +400,6 @@
 
   void Move(Register target, Handle<Object> value);
 
-  struct Unresolved {
-    int pc;
-    uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
-    const char* name;
-  };
-  List<Unresolved>* unresolved() { return &unresolved_; }
-
   Handle<Object> CodeObject() { return code_object_; }
 
 
@@ -418,6 +435,13 @@
   // ---------------------------------------------------------------------------
   // String utilities.
 
+  // Check whether the instance type represents a flat ascii string. Jump to the
+  // label if not. If the instance type can be scratched specify same register
+  // for both instance type and scratch.
+  void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
+                                              Register scratch,
+                                              Label *on_not_flat_ascii_string);
+
   // Checks if both objects are sequential ASCII strings, and jumps to label
   // if either is not.
   void JumpIfNotBothSequentialAsciiStrings(Register object1,
@@ -427,11 +451,10 @@
                                            Label *on_not_flat_ascii_strings);
 
  private:
-  List<Unresolved> unresolved_;
   bool generating_stub_;
   bool allow_stub_calls_;
-  Handle<Object> code_object_;  // This handle will be patched with the
-                                // code object on installation.
+  // This handle will be patched with the code object on installation.
+  Handle<Object> code_object_;
 
   // Helper functions for generating invokes.
   void InvokePrologue(const ParameterCount& expected,
@@ -441,18 +464,6 @@
                       Label* done,
                       InvokeFlag flag);
 
-  // Prepares for a call or jump to a builtin by doing two things:
-  // 1. Emits code that fetches the builtin's function object from the context
-  //    at runtime, and puts it in the register rdi.
-  // 2. Fetches the builtin's code object, and returns it in a handle, at
-  //    compile time, so that later code can emit instructions to jump or call
-  //    the builtin directly.  If the code object has not yet been created, it
-  //    returns the builtin code object for IllegalFunction, and sets the
-  //    output parameter "resolved" to false.  Code that uses the return value
-  //    should then add the address and the builtin name to the list of fixups
-  //    called unresolved_, which is fixed up by the bootstrapper.
-  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
-
   // Activation support.
   void EnterFrame(StackFrame::Type type);
   void LeaveFrame(StackFrame::Type type);
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 7acf81c..5729d9d 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -152,22 +152,6 @@
 }
 
 
-static void PushInterceptorArguments(MacroAssembler* masm,
-                                     Register receiver,
-                                     Register holder,
-                                     Register name,
-                                     JSObject* holder_obj) {
-  __ push(receiver);
-  __ push(holder);
-  __ push(name);
-  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
-  ASSERT(!Heap::InNewSpace(interceptor));
-  __ mov(receiver, Immediate(Handle<Object>(interceptor)));
-  __ push(receiver);
-  __ push(FieldOperand(receiver, InterceptorInfo::kDataOffset));
-}
-
-
 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
                                                        int index,
                                                        Register prototype) {
@@ -226,30 +210,32 @@
 
 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
                                             Register receiver,
-                                            Register scratch,
+                                            Register scratch1,
+                                            Register scratch2,
                                             Label* miss) {
-  Label load_length, check_wrapper;
+  Label check_wrapper;
 
   // Check if the object is a string leaving the instance type in the
   // scratch register.
-  GenerateStringCheck(masm, receiver, scratch, miss, &check_wrapper);
+  GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
 
   // Load length from the string and convert to a smi.
-  __ bind(&load_length);
   __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
   __ SmiTag(eax);
   __ ret(0);
 
   // Check if the object is a JSValue wrapper.
   __ bind(&check_wrapper);
-  __ cmp(scratch, JS_VALUE_TYPE);
+  __ cmp(scratch1, JS_VALUE_TYPE);
   __ j(not_equal, miss, not_taken);
 
   // Check if the wrapped value is a string and load the length
   // directly if it is.
-  __ mov(receiver, FieldOperand(receiver, JSValue::kValueOffset));
-  GenerateStringCheck(masm, receiver, scratch, miss, miss);
-  __ jmp(&load_length);
+  __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+  GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+  __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
+  __ SmiTag(eax);
+  __ ret(0);
 }
 
 
@@ -285,20 +271,31 @@
 }
 
 
+static void PushInterceptorArguments(MacroAssembler* masm,
+                                     Register receiver,
+                                     Register holder,
+                                     Register name,
+                                     JSObject* holder_obj) {
+  __ push(receiver);
+  __ push(holder);
+  __ push(name);
+  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+  ASSERT(!Heap::InNewSpace(interceptor));
+  __ mov(receiver, Immediate(Handle<Object>(interceptor)));
+  __ push(receiver);
+  __ push(FieldOperand(receiver, InterceptorInfo::kDataOffset));
+}
+
+
 static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
                                                    Register receiver,
                                                    Register holder,
                                                    Register name,
                                                    JSObject* holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
-  ExternalReference ref =
-      ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
-  __ mov(eax, Immediate(5));
-  __ mov(ebx, Immediate(ref));
-
-  CEntryStub stub(1);
-  __ CallStub(&stub);
+  __ CallExternalReference(
+        ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly)),
+        5);
 }
 
 
@@ -326,7 +323,7 @@
       stub_compiler->CheckPrototypes(object, receiver, holder,
                                      scratch1, scratch2, name, miss);
 
-  if (lookup->IsValid() && lookup->IsCacheable()) {
+  if (lookup->IsProperty() && lookup->IsCacheable()) {
     compiler->CompileCacheable(masm,
                                stub_compiler,
                                receiver,
@@ -362,7 +359,7 @@
                         LookupResult* lookup,
                         String* name,
                         Label* miss_label) {
-    AccessorInfo* callback = 0;
+    AccessorInfo* callback = NULL;
     bool optimize = false;
     // So far the most popular follow ups for interceptor loads are FIELD
     // and CALLBACKS, so inline only them, other cases may be added
@@ -479,39 +476,362 @@
 };
 
 
+// Holds information about possible function call optimizations.
+class CallOptimization BASE_EMBEDDED {
+ public:
+  explicit CallOptimization(LookupResult* lookup)
+    : constant_function_(NULL),
+      is_simple_api_call_(false),
+      expected_receiver_type_(NULL),
+      api_call_info_(NULL) {
+    if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
+
+    // We only optimize constant function calls.
+    if (lookup->type() != CONSTANT_FUNCTION) return;
+
+    Initialize(lookup->GetConstantFunction());
+  }
+
+  explicit CallOptimization(JSFunction* function) {
+    Initialize(function);
+  }
+
+  bool is_constant_call() const {
+    return constant_function_ != NULL;
+  }
+
+  JSFunction* constant_function() const {
+    ASSERT(constant_function_ != NULL);
+    return constant_function_;
+  }
+
+  bool is_simple_api_call() const {
+    return is_simple_api_call_;
+  }
+
+  FunctionTemplateInfo* expected_receiver_type() const {
+    ASSERT(is_simple_api_call_);
+    return expected_receiver_type_;
+  }
+
+  CallHandlerInfo* api_call_info() const {
+    ASSERT(is_simple_api_call_);
+    return api_call_info_;
+  }
+
+  // Returns the depth of the object having the expected type in the
+  // prototype chain between the two arguments.
+  int GetPrototypeDepthOfExpectedType(JSObject* object,
+                                      JSObject* holder) const {
+    ASSERT(is_simple_api_call_);
+    if (expected_receiver_type_ == NULL) return 0;
+    int depth = 0;
+    while (object != holder) {
+      if (object->IsInstanceOf(expected_receiver_type_)) return depth;
+      object = JSObject::cast(object->GetPrototype());
+      ++depth;
+    }
+    if (holder->IsInstanceOf(expected_receiver_type_)) return depth;
+    return kInvalidProtoDepth;
+  }
+
+ private:
+  void Initialize(JSFunction* function) {
+    if (!function->is_compiled()) return;
+
+    constant_function_ = function;
+    is_simple_api_call_ = false;
+
+    AnalyzePossibleApiFunction(function);
+  }
+
+  // Determines whether the given function can be called using the
+  // fast api call builtin.
+  void AnalyzePossibleApiFunction(JSFunction* function) {
+    SharedFunctionInfo* sfi = function->shared();
+    if (sfi->function_data()->IsUndefined()) return;
+    FunctionTemplateInfo* info =
+        FunctionTemplateInfo::cast(sfi->function_data());
+
+    // Require a C++ callback.
+    if (info->call_code()->IsUndefined()) return;
+    api_call_info_ = CallHandlerInfo::cast(info->call_code());
+
+    // Accept signatures that either have no restrictions at all or
+    // only have restrictions on the receiver.
+    if (!info->signature()->IsUndefined()) {
+      SignatureInfo* signature = SignatureInfo::cast(info->signature());
+      if (!signature->args()->IsUndefined()) return;
+      if (!signature->receiver()->IsUndefined()) {
+        expected_receiver_type_ =
+            FunctionTemplateInfo::cast(signature->receiver());
+      }
+    }
+
+    is_simple_api_call_ = true;
+  }
+
+  JSFunction* constant_function_;
+  bool is_simple_api_call_;
+  FunctionTemplateInfo* expected_receiver_type_;
+  CallHandlerInfo* api_call_info_;
+};
+
+
+// Reserves space for the extra arguments to FastHandleApiCall in the
+// caller's frame.
+//
+// These arguments are set by CheckPrototypes and GenerateFastApiCall.
+static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : last argument in the internal frame of the caller
+  // -----------------------------------
+  __ pop(scratch);
+  __ push(Immediate(Smi::FromInt(0)));
+  __ push(Immediate(Smi::FromInt(0)));
+  __ push(Immediate(Smi::FromInt(0)));
+  __ push(Immediate(Smi::FromInt(0)));
+  __ push(scratch);
+}
+
+
+// Undoes the effects of ReserveSpaceForFastApiCall.
+static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
+  // ----------- S t a t e -------------
+  //  -- esp[0]  : return address
+  //  -- esp[4]  : last fast api call extra argument
+  //  -- ...
+  //  -- esp[16] : first fast api call extra argument
+  //  -- esp[20] : last argument in the internal frame
+  // -----------------------------------
+  __ pop(scratch);
+  __ add(Operand(esp), Immediate(kPointerSize * 4));
+  __ push(scratch);
+}
+
+
+// Generates call to FastHandleApiCall builtin.
+static void GenerateFastApiCall(MacroAssembler* masm,
+                                const CallOptimization& optimization,
+                                int argc) {
+  // ----------- S t a t e -------------
+  //  -- esp[0]              : return address
+  //  -- esp[4]              : object passing the type check
+  //                           (last fast api call extra argument,
+  //                            set by CheckPrototypes)
+  //  -- esp[8]              : api call data
+  //  -- esp[12]             : api callback
+  //  -- esp[16]             : api function
+  //                           (first fast api call extra argument)
+  //  -- esp[20]             : last argument
+  //  -- ...
+  //  -- esp[(argc + 5) * 4] : first argument
+  //  -- esp[(argc + 6) * 4] : receiver
+  // -----------------------------------
+
+  // Get the function and setup the context.
+  JSFunction* function = optimization.constant_function();
+  __ mov(edi, Immediate(Handle<JSFunction>(function)));
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // Pass the additional arguments FastHandleApiCall expects.
+  __ mov(Operand(esp, 4 * kPointerSize), edi);
+  bool info_loaded = false;
+  Object* callback = optimization.api_call_info()->callback();
+  if (Heap::InNewSpace(callback)) {
+    info_loaded = true;
+    __ mov(ecx, Handle<CallHandlerInfo>(optimization.api_call_info()));
+    __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kCallbackOffset));
+    __ mov(Operand(esp, 3 * kPointerSize), ebx);
+  } else {
+    __ mov(Operand(esp, 3 * kPointerSize), Immediate(Handle<Object>(callback)));
+  }
+  Object* call_data = optimization.api_call_info()->data();
+  if (Heap::InNewSpace(call_data)) {
+    if (!info_loaded) {
+      __ mov(ecx, Handle<CallHandlerInfo>(optimization.api_call_info()));
+    }
+    __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
+    __ mov(Operand(esp, 2 * kPointerSize), ebx);
+  } else {
+    __ mov(Operand(esp, 2 * kPointerSize),
+           Immediate(Handle<Object>(call_data)));
+  }
+
+  // Set the number of arguments.
+  __ mov(eax, Immediate(argc + 4));
+
+  // Jump to the fast api call builtin (tail call).
+  Handle<Code> code = Handle<Code>(
+      Builtins::builtin(Builtins::FastHandleApiCall));
+  ParameterCount expected(0);
+  __ InvokeCode(code, expected, expected,
+                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+}
+
+
 class CallInterceptorCompiler BASE_EMBEDDED {
  public:
-  CallInterceptorCompiler(const ParameterCount& arguments, Register name)
-      : arguments_(arguments), argc_(arguments.immediate()), name_(name) {}
+  CallInterceptorCompiler(StubCompiler* stub_compiler,
+                          const ParameterCount& arguments,
+                          Register name)
+      : stub_compiler_(stub_compiler),
+        arguments_(arguments),
+        name_(name) {}
 
+  void Compile(MacroAssembler* masm,
+               JSObject* object,
+               JSObject* holder,
+               String* name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Label* miss) {
+    ASSERT(holder->HasNamedInterceptor());
+    ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+    // Check that the receiver isn't a smi.
+    __ test(receiver, Immediate(kSmiTagMask));
+    __ j(zero, miss, not_taken);
+
+    CallOptimization optimization(lookup);
+
+    if (optimization.is_constant_call() &&
+        !Top::CanHaveSpecialFunctions(holder)) {
+      CompileCacheable(masm,
+                       object,
+                       receiver,
+                       scratch1,
+                       scratch2,
+                       holder,
+                       lookup,
+                       name,
+                       optimization,
+                       miss);
+    } else {
+      CompileRegular(masm,
+                     object,
+                     receiver,
+                     scratch1,
+                     scratch2,
+                     name,
+                     holder,
+                     miss);
+    }
+  }
+
+ private:
   void CompileCacheable(MacroAssembler* masm,
-                        StubCompiler* stub_compiler,
+                        JSObject* object,
                         Register receiver,
-                        Register holder,
                         Register scratch1,
                         Register scratch2,
                         JSObject* holder_obj,
                         LookupResult* lookup,
                         String* name,
+                        const CallOptimization& optimization,
                         Label* miss_label) {
-    JSFunction* function = 0;
-    bool optimize = false;
-    // So far the most popular case for failed interceptor is
-    // CONSTANT_FUNCTION sitting below.
-    if (lookup->type() == CONSTANT_FUNCTION) {
-      function = lookup->GetConstantFunction();
-      // JSArray holder is a special case for call constant function
-      // (see the corresponding code).
-      if (function->is_compiled() && !holder_obj->IsJSArray()) {
-        optimize = true;
+    ASSERT(optimization.is_constant_call());
+    ASSERT(!lookup->holder()->IsGlobalObject());
+
+    int depth1 = kInvalidProtoDepth;
+    int depth2 = kInvalidProtoDepth;
+    bool can_do_fast_api_call = false;
+    if (optimization.is_simple_api_call() &&
+        !lookup->holder()->IsGlobalObject()) {
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
+      if (depth1 == kInvalidProtoDepth) {
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
+                                                              lookup->holder());
       }
+      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+                             (depth2 != kInvalidProtoDepth);
     }
 
-    if (!optimize) {
-      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
-      return;
+    __ IncrementCounter(&Counters::call_const_interceptor, 1);
+
+    if (can_do_fast_api_call) {
+      __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1);
+      ReserveSpaceForFastApiCall(masm, scratch1);
     }
 
+    Label miss_cleanup;
+    Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
+    Register holder =
+        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+                                        scratch1, scratch2, name,
+                                        depth1, miss);
+
+    Label regular_invoke;
+    LoadWithInterceptor(masm, receiver, holder, holder_obj, &regular_invoke);
+
+    // Generate code for the failed interceptor case.
+
+    // Check the lookup is still valid.
+    stub_compiler_->CheckPrototypes(holder_obj, receiver,
+                                    lookup->holder(),
+                                    scratch1, scratch2, name,
+                                    depth2, miss);
+
+    if (can_do_fast_api_call) {
+      GenerateFastApiCall(masm, optimization, arguments_.immediate());
+    } else {
+      __ InvokeFunction(optimization.constant_function(), arguments_,
+                        JUMP_FUNCTION);
+    }
+
+    if (can_do_fast_api_call) {
+      __ bind(&miss_cleanup);
+      FreeSpaceForFastApiCall(masm, scratch1);
+      __ jmp(miss_label);
+    }
+
+    __ bind(&regular_invoke);
+    if (can_do_fast_api_call) {
+      FreeSpaceForFastApiCall(masm, scratch1);
+    }
+  }
+
+  void CompileRegular(MacroAssembler* masm,
+                      JSObject* object,
+                      Register receiver,
+                      Register scratch1,
+                      Register scratch2,
+                      String* name,
+                      JSObject* holder_obj,
+                      Label* miss_label) {
+    Register holder =
+        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+                                        scratch1, scratch2, name,
+                                        miss_label);
+
+    __ EnterInternalFrame();
+    // Save the name_ register across the call.
+    __ push(name_);
+
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             name_,
+                             holder_obj);
+
+    __ CallExternalReference(
+          ExternalReference(
+              IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
+          5);
+
+    // Restore the name_ register.
+    __ pop(name_);
+    __ LeaveInternalFrame();
+  }
+
+  void LoadWithInterceptor(MacroAssembler* masm,
+                           Register receiver,
+                           Register holder,
+                           JSObject* holder_obj,
+                           Label* interceptor_succeeded) {
     __ EnterInternalFrame();
     __ push(holder);  // Save the holder.
     __ push(name_);  // Save the name.
@@ -527,66 +847,11 @@
     __ LeaveInternalFrame();
 
     __ cmp(eax, Factory::no_interceptor_result_sentinel());
-    Label invoke;
-    __ j(not_equal, &invoke);
-
-    stub_compiler->CheckPrototypes(holder_obj, receiver,
-                                   lookup->holder(), scratch1,
-                                   scratch2,
-                                   name,
-                                   miss_label);
-    if (lookup->holder()->IsGlobalObject()) {
-      __ mov(edx, Operand(esp, (argc_ + 1) * kPointerSize));
-      __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
-      __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edx);
-    }
-
-    ASSERT(function->is_compiled());
-    // Get the function and setup the context.
-    __ mov(edi, Immediate(Handle<JSFunction>(function)));
-    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
-    // Jump to the cached code (tail call).
-    Handle<Code> code(function->code());
-    ParameterCount expected(function->shared()->formal_parameter_count());
-    __ InvokeCode(code, expected, arguments_,
-                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
-
-    __ bind(&invoke);
+    __ j(not_equal, interceptor_succeeded);
   }
 
-  void CompileRegular(MacroAssembler* masm,
-                      Register receiver,
-                      Register holder,
-                      Register scratch,
-                      JSObject* holder_obj,
-                      Label* miss_label) {
-    __ EnterInternalFrame();
-    // Save the name_ register across the call.
-    __ push(name_);
-
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             holder_obj);
-
-    ExternalReference ref = ExternalReference(
-        IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
-    __ mov(eax, Immediate(5));
-    __ mov(ebx, Immediate(ref));
-
-    CEntryStub stub(1);
-    __ CallStub(&stub);
-
-    // Restore the name_ register.
-    __ pop(name_);
-    __ LeaveInternalFrame();
-  }
-
- private:
+  StubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
-  int argc_;
   Register name_;
 };
 
@@ -605,8 +870,9 @@
 }
 
 
+// Both name_reg and receiver_reg are preserved on jumps to miss_label,
+// but may be destroyed if store is successful.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      Builtins::Name storage_extend,
                                       JSObject* object,
                                       int index,
                                       Map* transition,
@@ -636,9 +902,13 @@
   if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
-    __ mov(ecx, Immediate(Handle<Map>(transition)));
-    Handle<Code> ic(Builtins::builtin(storage_extend));
-    __ jmp(ic, RelocInfo::CODE_TARGET);
+    __ pop(scratch);  // Return address.
+    __ push(receiver_reg);
+    __ push(Immediate(Handle<Map>(transition)));
+    __ push(eax);
+    __ push(scratch);
+    __ TailCallRuntime(
+        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), 3, 1);
     return;
   }
 
@@ -691,10 +961,12 @@
                                        Register holder_reg,
                                        Register scratch,
                                        String* name,
+                                       int push_at_depth,
                                        Label* miss) {
   // Check that the maps haven't changed.
   Register result =
-      masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
+      masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,
+                        push_at_depth, miss);
 
   // If we've skipped any global objects, it's not enough to verify
   // that their maps haven't changed.
@@ -716,7 +988,7 @@
     object = JSObject::cast(object->GetPrototype());
   }
 
-  // Return the register containin the holder.
+  // Return the register containing the holder.
   return result;
 }
 
@@ -887,7 +1159,7 @@
 }
 
 
-Object* CallStubCompiler::CompileCallField(Object* object,
+Object* CallStubCompiler::CompileCallField(JSObject* object,
                                            JSObject* holder,
                                            int index,
                                            String* name) {
@@ -909,9 +1181,7 @@
   __ j(zero, &miss, not_taken);
 
   // Do the right check and compute the holder register.
-  Register reg =
-      CheckPrototypes(JSObject::cast(object), edx, holder,
-                      ebx, eax, name, &miss);
+  Register reg = CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
 
   GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
 
@@ -969,15 +1239,31 @@
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
 
+  CallOptimization optimization(function);
+  int depth = kInvalidProtoDepth;
+
   switch (check) {
     case RECEIVER_MAP_CHECK:
+      __ IncrementCounter(&Counters::call_const, 1);
+
+      if (optimization.is_simple_api_call() && !object->IsGlobalObject()) {
+        depth = optimization.GetPrototypeDepthOfExpectedType(
+            JSObject::cast(object), holder);
+      }
+
+      if (depth != kInvalidProtoDepth) {
+        __ IncrementCounter(&Counters::call_const_fast_api, 1);
+        ReserveSpaceForFastApiCall(masm(), eax);
+      }
+
       // Check that the maps haven't changed.
       CheckPrototypes(JSObject::cast(object), edx, holder,
-                      ebx, eax, name, &miss);
+                      ebx, eax, name, depth, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
       if (object->IsGlobalObject()) {
+        ASSERT(depth == kInvalidProtoDepth);
         __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
         __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
       }
@@ -1062,19 +1348,17 @@
       UNREACHABLE();
   }
 
-  // Get the function and setup the context.
-  __ mov(edi, Immediate(Handle<JSFunction>(function)));
-  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
-  // Jump to the cached code (tail call).
-  ASSERT(function->is_compiled());
-  Handle<Code> code(function->code());
-  ParameterCount expected(function->shared()->formal_parameter_count());
-  __ InvokeCode(code, expected, arguments(),
-                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+  if (depth != kInvalidProtoDepth) {
+    GenerateFastApiCall(masm(), optimization, argc);
+  } else {
+    __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  }
 
   // Handle call cache miss.
   __ bind(&miss);
+  if (depth != kInvalidProtoDepth) {
+    FreeSpaceForFastApiCall(masm(), eax);
+  }
   Handle<Code> ic = ComputeCallMiss(arguments().immediate());
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
@@ -1087,7 +1371,7 @@
 }
 
 
-Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
                                                  JSObject* holder,
                                                  String* name) {
   // ----------- S t a t e -------------
@@ -1108,18 +1392,16 @@
   // Get the receiver from the stack.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(arguments(), ecx);
-  CompileLoadInterceptor(&compiler,
-                         this,
-                         masm(),
-                         JSObject::cast(object),
-                         holder,
-                         name,
-                         &lookup,
-                         edx,
-                         ebx,
-                         edi,
-                         &miss);
+  CallInterceptorCompiler compiler(this, arguments(), ecx);
+  compiler.Compile(masm(),
+                   object,
+                   holder,
+                   name,
+                   &lookup,
+                   edx,
+                   ebx,
+                   edi,
+                   &miss);
 
   // Restore receiver.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -1249,7 +1531,6 @@
 
   // Generate store field code.  Trashes the name register.
   GenerateStoreField(masm(),
-                     Builtins::StoreIC_ExtendStorage,
                      object,
                      index,
                      transition,
@@ -1423,15 +1704,14 @@
   __ j(not_equal, &miss, not_taken);
 
   // Get the object from the stack.
-  __ mov(ebx, Operand(esp, 2 * kPointerSize));
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
 
   // Generate store field code.  Trashes the name register.
   GenerateStoreField(masm(),
-                     Builtins::KeyedStoreIC_ExtendStorage,
                      object,
                      index,
                      transition,
-                     ebx, ecx, edx,
+                     edx, ecx, ebx,
                      &miss);
 
   // Handle store cache miss.
@@ -1451,13 +1731,12 @@
                                            int index,
                                            String* name) {
   // ----------- S t a t e -------------
+  //  -- eax    : receiver
   //  -- ecx    : name
   //  -- esp[0] : return address
-  //  -- esp[4] : receiver
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, Operand(esp, kPointerSize));
   GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1472,13 +1751,12 @@
                                               JSObject* holder,
                                               AccessorInfo* callback) {
   // ----------- S t a t e -------------
+  //  -- eax    : receiver
   //  -- ecx    : name
   //  -- esp[0] : return address
-  //  -- esp[4] : receiver
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, Operand(esp, kPointerSize));
   Failure* failure = Failure::InternalError();
   bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
                                       callback, name, &miss, &failure);
@@ -1497,13 +1775,12 @@
                                               Object* value,
                                               String* name) {
   // ----------- S t a t e -------------
+  //  -- eax    : receiver
   //  -- ecx    : name
   //  -- esp[0] : return address
-  //  -- esp[4] : receiver
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, Operand(esp, kPointerSize));
   GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1517,16 +1794,15 @@
                                                  JSObject* holder,
                                                  String* name) {
   // ----------- S t a t e -------------
+  //  -- eax    : receiver
   //  -- ecx    : name
   //  -- esp[0] : return address
-  //  -- esp[4] : receiver
   // -----------------------------------
   Label miss;
 
   LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
 
-  __ mov(eax, Operand(esp, kPointerSize));
   // TODO(368): Compile in the whole chain: all the interceptors in
   // prototypes and ultimate answer.
   GenerateLoadInterceptor(receiver,
@@ -1553,15 +1829,12 @@
                                             String* name,
                                             bool is_dont_delete) {
   // ----------- S t a t e -------------
+  //  -- eax    : receiver
   //  -- ecx    : name
   //  -- esp[0] : return address
-  //  -- esp[4] : receiver
   // -----------------------------------
   Label miss;
 
-  // Get the receiver from the stack.
-  __ mov(eax, Operand(esp, kPointerSize));
-
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual loads. In this case,
   // the receiver cannot be a smi.
@@ -1574,19 +1847,20 @@
   CheckPrototypes(object, eax, holder, ebx, edx, name, &miss);
 
   // Get the value from the cell.
-  __ mov(eax, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-  __ mov(eax, FieldOperand(eax, JSGlobalPropertyCell::kValueOffset));
+  __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+  __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
   if (!is_dont_delete) {
-    __ cmp(eax, Factory::the_hole_value());
+    __ cmp(ebx, Factory::the_hole_value());
     __ j(equal, &miss, not_taken);
   } else if (FLAG_debug_code) {
-    __ cmp(eax, Factory::the_hole_value());
+    __ cmp(ebx, Factory::the_hole_value());
     __ Check(not_equal, "DontDelete cells can't contain the hole");
   }
 
   __ IncrementCounter(&Counters::named_load_global_inline, 1);
+  __ mov(eax, ebx);
   __ ret(0);
 
   __ bind(&miss);
@@ -1603,21 +1877,19 @@
                                                 JSObject* holder,
                                                 int index) {
   // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : name
-  //  -- esp[8] : receiver
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, Operand(esp, kPointerSize));
-  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_field, 1);
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  GenerateLoadField(receiver, holder, ecx, ebx, edx, index, name, &miss);
+  GenerateLoadField(receiver, holder, edx, ebx, ecx, index, name, &miss);
 
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_field, 1);
@@ -1633,14 +1905,12 @@
                                                    JSObject* holder,
                                                    AccessorInfo* callback) {
   // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : name
-  //  -- esp[8] : receiver
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, Operand(esp, kPointerSize));
-  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_callback, 1);
 
   // Check that the name has not changed.
@@ -1648,7 +1918,7 @@
   __ j(not_equal, &miss, not_taken);
 
   Failure* failure = Failure::InternalError();
-  bool success = GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
+  bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx,
                                       callback, name, &miss, &failure);
   if (!success) return failure;
 
@@ -1666,21 +1936,19 @@
                                                    JSObject* holder,
                                                    Object* value) {
   // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : name
-  //  -- esp[8] : receiver
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, Operand(esp, kPointerSize));
-  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  GenerateLoadConstant(receiver, holder, ecx, ebx, edx,
+  GenerateLoadConstant(receiver, holder, edx, ebx, ecx,
                        value, name, &miss);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
@@ -1695,14 +1963,12 @@
                                                       JSObject* holder,
                                                       String* name) {
   // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : name
-  //  -- esp[8] : receiver
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, Operand(esp, kPointerSize));
-  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
 
   // Check that the name has not changed.
@@ -1714,9 +1980,9 @@
   GenerateLoadInterceptor(receiver,
                           holder,
                           &lookup,
-                          ecx,
-                          eax,
                           edx,
+                          eax,
+                          ecx,
                           ebx,
                           name,
                           &miss);
@@ -1733,21 +1999,19 @@
 
 Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
   // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : name
-  //  -- esp[8] : receiver
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, Operand(esp, kPointerSize));
-  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_array_length, 1);
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  GenerateLoadArrayLength(masm(), ecx, edx, &miss);
+  GenerateLoadArrayLength(masm(), edx, ecx, &miss);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_array_length, 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1759,21 +2023,19 @@
 
 Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
   // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : name
-  //  -- esp[8] : receiver
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, Operand(esp, kPointerSize));
-  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_string_length, 1);
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  GenerateLoadStringLength(masm(), ecx, edx, &miss);
+  GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_string_length, 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1785,21 +2047,19 @@
 
 Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
   // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : name
-  //  -- esp[8] : receiver
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, Operand(esp, kPointerSize));
-  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  GenerateLoadFunctionPrototype(masm(), ecx, edx, ebx, &miss);
+  GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 9267507..d248539 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -45,7 +45,7 @@
     : elements_(parameter_count() + local_count() + kPreallocatedElements),
       stack_pointer_(parameter_count() + 1) {  // 0-based index of TOS.
   for (int i = 0; i <= stack_pointer_; i++) {
-    elements_.Add(FrameElement::MemoryElement());
+    elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
   }
   for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
     register_locations_[i] = kIllegalIndex;
@@ -173,10 +173,12 @@
   for (int i = 0; i < element_count(); i++) {
     FrameElement element = elements_[i];
 
+    // All number type information is reset to unknown for a mergable frame
+    // because of incoming back edges.
     if (element.is_constant() || element.is_copy()) {
       if (element.is_synced()) {
         // Just spill.
-        elements_[i] = FrameElement::MemoryElement();
+        elements_[i] = FrameElement::MemoryElement(NumberInfo::kUnknown);
       } else {
         // Allocate to a register.
         FrameElement backing_element;  // Invalid if not a copy.
@@ -187,7 +189,8 @@
         ASSERT(fresh.is_valid());  // A register was spilled if all were in use.
         elements_[i] =
             FrameElement::RegisterElement(fresh.reg(),
-                                          FrameElement::NOT_SYNCED);
+                                          FrameElement::NOT_SYNCED,
+                                          NumberInfo::kUnknown);
         Use(fresh.reg(), i);
 
         // Emit a move.
@@ -220,6 +223,7 @@
       // The copy flag is not relied on before the end of this loop,
       // including when registers are spilled.
       elements_[i].clear_copied();
+      elements_[i].set_number_info(NumberInfo::kUnknown);
     }
   }
 }
@@ -607,10 +611,14 @@
   // Set the new backing element.
   if (elements_[new_backing_index].is_synced()) {
     elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg, FrameElement::SYNCED);
+        FrameElement::RegisterElement(backing_reg,
+                                      FrameElement::SYNCED,
+                                      original.number_info());
   } else {
     elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
+        FrameElement::RegisterElement(backing_reg,
+                                      FrameElement::NOT_SYNCED,
+                                      original.number_info());
   }
   // Update the other copies.
   for (int i = new_backing_index + 1; i < element_count(); i++) {
@@ -641,7 +649,8 @@
       ASSERT(fresh.is_valid());
       FrameElement new_element =
           FrameElement::RegisterElement(fresh.reg(),
-                                        FrameElement::NOT_SYNCED);
+                                        FrameElement::NOT_SYNCED,
+                                        original.number_info());
       Use(fresh.reg(), element_count());
       elements_.Add(new_element);
       __ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
@@ -853,6 +862,17 @@
 }
 
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void VirtualFrame::DebugBreak() {
+  PrepareForCall(0, 0);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ DebugBreak();
+  Result result = cgen()->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+}
+#endif
+
+
 Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    int arg_count) {
@@ -877,22 +897,53 @@
 
 Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
   // Name and receiver are on the top of the frame.  The IC expects
-  // name in ecx and receiver on the stack.  It does not drop the
-  // receiver.
+  // name in ecx and receiver in eax.
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
   Result name = Pop();
-  PrepareForCall(1, 0);  // One stack arg, not callee-dropped.
-  name.ToRegister(ecx);
+  Result receiver = Pop();
+  PrepareForCall(0, 0);  // No stack arguments.
+  // Move results to the right registers:
+  if (name.is_register() && name.reg().is(eax)) {
+    if (receiver.is_register() && receiver.reg().is(ecx)) {
+      // Wrong registers.
+      __ xchg(eax, ecx);
+    } else {
+      // Register ecx is free for name, which frees eax for receiver.
+      name.ToRegister(ecx);
+      receiver.ToRegister(eax);
+    }
+  } else {
+    // Register eax is free for receiver, which frees ecx for name.
+    receiver.ToRegister(eax);
+    name.ToRegister(ecx);
+  }
   name.Unuse();
+  receiver.Unuse();
   return RawCallCodeObject(ic, mode);
 }
 
 
 Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
-  // Key and receiver are on top of the frame.  The IC expects them on
-  // the stack.  It does not drop them.
+  // Key and receiver are on top of the frame. Put them in eax and edx.
+  Result key = Pop();
+  Result receiver = Pop();
+  PrepareForCall(0, 0);
+
+  if (!key.is_register() || !key.reg().is(edx)) {
+    // Register edx is available for receiver.
+    receiver.ToRegister(edx);
+    key.ToRegister(eax);
+  } else if (!receiver.is_register() || !receiver.reg().is(eax)) {
+    // Register eax is available for key.
+    key.ToRegister(eax);
+    receiver.ToRegister(edx);
+  } else {
+    __ xchg(edx, eax);
+  }
+  key.Unuse();
+  receiver.Unuse();
+
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  PrepareForCall(2, 0);  // Two stack args, neither callee-dropped.
   return RawCallCodeObject(ic, mode);
 }
 
@@ -947,7 +998,6 @@
   // expects value in eax and key and receiver on the stack.  It does
   // not drop the key and receiver.
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-  // TODO(1222589): Make the IC grab the values from the stack.
   Result value = Pop();
   PrepareForCall(2, 0);  // Two stack args, neither callee-dropped.
   value.ToRegister(eax);
@@ -1025,6 +1075,14 @@
   int index = element_count();
   ASSERT(element.is_valid());
 
+  // Get number type information of the result.
+  NumberInfo::Type info;
+  if (!element.is_copy()) {
+    info = element.number_info();
+  } else {
+    info = elements_[element.index()].number_info();
+  }
+
   bool pop_needed = (stack_pointer_ == index);
   if (pop_needed) {
     stack_pointer_--;
@@ -1032,6 +1090,7 @@
       Result temp = cgen()->allocator()->Allocate();
       ASSERT(temp.is_valid());
       __ pop(temp.reg());
+      temp.set_number_info(info);
       return temp;
     }
 
@@ -1059,14 +1118,16 @@
     ASSERT(temp.is_valid());
     Use(temp.reg(), index);
     FrameElement new_element =
-        FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
+        FrameElement::RegisterElement(temp.reg(),
+                                      FrameElement::SYNCED,
+                                      element.number_info());
     // Preserve the copy flag on the element.
     if (element.is_copied()) new_element.set_copied();
     elements_[index] = new_element;
     __ mov(temp.reg(), Operand(ebp, fp_relative(index)));
-    return Result(temp.reg());
+    return Result(temp.reg(), info);
   } else if (element.is_register()) {
-    return Result(element.reg());
+    return Result(element.reg(), info);
   } else {
     ASSERT(element.is_constant());
     return Result(element.handle());
@@ -1090,25 +1151,25 @@
 }
 
 
-void VirtualFrame::EmitPush(Register reg) {
+void VirtualFrame::EmitPush(Register reg, NumberInfo::Type info) {
   ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement());
+  elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
   __ push(reg);
 }
 
 
-void VirtualFrame::EmitPush(Operand operand) {
+void VirtualFrame::EmitPush(Operand operand, NumberInfo::Type info) {
   ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement());
+  elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
   __ push(operand);
 }
 
 
-void VirtualFrame::EmitPush(Immediate immediate) {
+void VirtualFrame::EmitPush(Immediate immediate, NumberInfo::Type info) {
   ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement());
+  elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
   __ push(immediate);
 }
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
index d6d55d1..b078ba0 100644
--- a/src/ia32/virtual-frame-ia32.h
+++ b/src/ia32/virtual-frame-ia32.h
@@ -28,6 +28,7 @@
 #ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
 #define V8_IA32_VIRTUAL_FRAME_IA32_H_
 
+#include "number-info.h"
 #include "register-allocator.h"
 #include "scopes.h"
 
@@ -82,7 +83,8 @@
   MacroAssembler* masm() { return cgen()->masm(); }
 
   // Create a duplicate of an existing valid frame element.
-  FrameElement CopyElementAt(int index);
+  FrameElement CopyElementAt(int index,
+    NumberInfo::Type info = NumberInfo::kUninitialized);
 
   // The number of elements on the virtual frame.
   int element_count() { return elements_.length(); }
@@ -324,12 +326,16 @@
   Result CallRuntime(Runtime::Function* f, int arg_count);
   Result CallRuntime(Runtime::FunctionId id, int arg_count);
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  void DebugBreak();
+#endif
+
   // Invoke builtin given the number of arguments it expects on (and
   // removes from) the stack.
   Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count);
 
   // Call load IC.  Name and receiver are found on top of the frame.
-  // Receiver is not dropped.
+  // Both are dropped.
   Result CallLoadIC(RelocInfo::Mode mode);
 
   // Call keyed load IC.  Key and receiver are found on top of the
@@ -381,12 +387,15 @@
 
   // Push an element on top of the expression stack and emit a
   // corresponding push instruction.
-  void EmitPush(Register reg);
-  void EmitPush(Operand operand);
-  void EmitPush(Immediate immediate);
+  void EmitPush(Register reg,
+                NumberInfo::Type info = NumberInfo::kUnknown);
+  void EmitPush(Operand operand,
+                NumberInfo::Type info = NumberInfo::kUnknown);
+  void EmitPush(Immediate immediate,
+                NumberInfo::Type info = NumberInfo::kUnknown);
 
   // Push an element on the virtual frame.
-  void Push(Register reg);
+  void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
   void Push(Handle<Object> value);
   void Push(Smi* value) {
     Push(Handle<Object> (value));
@@ -398,7 +407,7 @@
     // This assert will trigger if you try to push the same value twice.
     ASSERT(result->is_valid());
     if (result->is_register()) {
-      Push(result->reg());
+      Push(result->reg(), result->number_info());
     } else {
       ASSERT(result->is_constant());
       Push(result->handle());
diff --git a/src/ic.cc b/src/ic.cc
index 8fc9ddb..31ece04 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -330,10 +330,11 @@
   while (true) {
     object->Lookup(name, lookup);
     // Besides normal conditions (property not found or it's not
-    // an interceptor), bail out of lookup is not cacheable: we won't
+    // an interceptor), bail out if lookup is not cacheable: we won't
     // be able to IC it anyway and regular lookup should work fine.
-    if (lookup->IsNotFound() || lookup->type() != INTERCEPTOR ||
-        !lookup->IsCacheable()) {
+    if (!lookup->IsFound()
+        || (lookup->type() != INTERCEPTOR)
+        || !lookup->IsCacheable()) {
       return;
     }
 
@@ -343,7 +344,7 @@
     }
 
     holder->LocalLookupRealNamedProperty(name, lookup);
-    if (lookup->IsValid()) {
+    if (lookup->IsProperty()) {
       ASSERT(lookup->type() != INTERCEPTOR);
       return;
     }
@@ -422,7 +423,7 @@
   LookupResult lookup;
   LookupForRead(*object, *name, &lookup);
 
-  if (!lookup.IsValid()) {
+  if (!lookup.IsProperty()) {
     // If the object does not have the requested property, check which
     // exception we need to throw.
     if (IsContextual(object)) {
@@ -455,7 +456,7 @@
 
   if (result->IsJSFunction()) {
     // Check if there is an optimized (builtin) version of the function.
-    // Ignored this will degrade performance for Array.prototype.{push,pop}.
+    // Ignored this will degrade performance for some Array functions.
     // Please note we only return the optimized function iff
     // the JSObject has FastElements.
     if (object->IsJSObject() && JSObject::cast(*object)->HasFastElements()) {
@@ -493,7 +494,7 @@
                           Handle<String> name) {
   ASSERT(lookup->IsLoaded());
   // Bail out if we didn't find a result.
-  if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+  if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
 
   // Compute the number of arguments.
   int argc = target()->arguments_count();
@@ -642,8 +643,8 @@
   LookupResult lookup;
   LookupForRead(*object, *name, &lookup);
 
-  // If lookup is invalid, check if we need to throw an exception.
-  if (!lookup.IsValid()) {
+  // If we did not find a property, check if we need to throw an exception.
+  if (!lookup.IsProperty()) {
     if (FLAG_strict || IsContextual(object)) {
       return ReferenceError("not_defined", name);
     }
@@ -653,7 +654,7 @@
   bool can_be_inlined =
       FLAG_use_ic &&
       state == PREMONOMORPHIC &&
-      lookup.IsValid() &&
+      lookup.IsProperty() &&
       lookup.IsLoaded() &&
       lookup.IsCacheable() &&
       lookup.holder() == *object &&
@@ -681,7 +682,7 @@
   }
 
   PropertyAttributes attr;
-  if (lookup.IsValid() && lookup.type() == INTERCEPTOR) {
+  if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
     // Get the property.
     Object* result = object->GetProperty(*object, &lookup, *name, &attr);
     if (result->IsFailure()) return result;
@@ -704,7 +705,7 @@
                           Handle<String> name) {
   ASSERT(lookup->IsLoaded());
   // Bail out if we didn't find a result.
-  if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+  if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
 
   // Loading properties from values is not common, so don't try to
   // deal with non-JS objects here.
@@ -857,8 +858,8 @@
     LookupResult lookup;
     LookupForRead(*object, *name, &lookup);
 
-    // If lookup is invalid, check if we need to throw an exception.
-    if (!lookup.IsValid()) {
+    // If we did not find a property, check if we need to throw an exception.
+    if (!lookup.IsProperty()) {
       if (FLAG_strict || IsContextual(object)) {
         return ReferenceError("not_defined", name);
       }
@@ -869,7 +870,7 @@
     }
 
     PropertyAttributes attr;
-    if (lookup.IsValid() && lookup.type() == INTERCEPTOR) {
+    if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
       // Get the property.
       Object* result = object->GetProperty(*object, &lookup, *name, &attr);
       if (result->IsFailure()) return result;
@@ -896,6 +897,8 @@
       Handle<JSObject> receiver = Handle<JSObject>::cast(object);
       if (receiver->HasExternalArrayElements()) {
         stub = external_array_stub(receiver->GetElementsKind());
+      } else if (receiver->HasIndexedInterceptor()) {
+        stub = indexed_interceptor_stub();
       }
     }
     set_target(stub);
@@ -919,7 +922,7 @@
                                Handle<Object> object, Handle<String> name) {
   ASSERT(lookup->IsLoaded());
   // Bail out if we didn't find a result.
-  if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+  if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
 
   if (!object->IsJSObject()) return;
   Handle<JSObject> receiver = Handle<JSObject>::cast(object);
@@ -992,7 +995,7 @@
 
 static bool StoreICableLookup(LookupResult* lookup) {
   // Bail out if we didn't find a result.
-  if (!lookup->IsValid() || !lookup->IsCacheable()) return false;
+  if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return false;
 
   // If the property is read-only, we leave the IC in its current
   // state.
@@ -1046,6 +1049,20 @@
     return *value;
   }
 
+
+  // Use specialized code for setting the length of arrays.
+  if (receiver->IsJSArray()
+      && name->Equals(Heap::length_symbol())
+      && receiver->AllowsSetElementsLength()) {
+#ifdef DEBUG
+    if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
+#endif
+    Code* target = Builtins::builtin(Builtins::StoreIC_ArrayLength);
+    set_target(target);
+    StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
+    return receiver->SetProperty(*name, *value, NONE);
+  }
+
   // Lookup the property locally in the receiver.
   if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
     LookupResult lookup;
@@ -1212,7 +1229,7 @@
   if (receiver->IsJSGlobalProxy()) return;
 
   // Bail out if we didn't find a result.
-  if (!lookup->IsValid() || !lookup->IsCacheable()) return;
+  if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return;
 
   // If the property is read-only, we leave the IC in its current
   // state.
@@ -1320,16 +1337,6 @@
 }
 
 
-void LoadIC::GenerateInitialize(MacroAssembler* masm) {
-  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
-}
-
-
-void LoadIC::GeneratePreMonomorphic(MacroAssembler* masm) {
-  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
-}
-
-
 // Used from ic_<arch>.cc
 Object* KeyedLoadIC_Miss(Arguments args) {
   NoHandleAllocation na;
@@ -1340,16 +1347,6 @@
 }
 
 
-void KeyedLoadIC::GenerateInitialize(MacroAssembler* masm) {
-  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
-}
-
-
-void KeyedLoadIC::GeneratePreMonomorphic(MacroAssembler* masm) {
-  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
-}
-
-
 // Used from ic_<arch>.cc.
 Object* StoreIC_Miss(Arguments args) {
   NoHandleAllocation na;
@@ -1361,6 +1358,17 @@
 }
 
 
+Object* StoreIC_ArrayLength(Arguments args) {
+  NoHandleAllocation nha;
+
+  ASSERT(args.length() == 2);
+  JSObject* receiver = JSObject::cast(args[0]);
+  Object* len = args[1];
+
+  return receiver->SetElementsLength(len);
+}
+
+
 // Extend storage is called in a store inline cache when
 // it is necessary to extend the properties array of a
 // JSObject.
@@ -1406,16 +1414,6 @@
 }
 
 
-void KeyedStoreIC::GenerateInitialize(MacroAssembler* masm) {
-  Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
-}
-
-
 static Address IC_utilities[] = {
 #define ADDR(name) FUNCTION_ADDR(name),
     IC_UTIL_LIST(ADDR)
diff --git a/src/ic.h b/src/ic.h
index a991e30..d545989 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -45,6 +45,7 @@
   ICU(KeyedLoadIC_Miss)                               \
   ICU(CallIC_Miss)                                    \
   ICU(StoreIC_Miss)                                   \
+  ICU(StoreIC_ArrayLength)                            \
   ICU(SharedStoreIC_ExtendStorage)                    \
   ICU(KeyedStoreIC_Miss)                              \
   /* Utilities for IC stubs. */                       \
@@ -53,6 +54,7 @@
   ICU(LoadPropertyWithInterceptorOnly)                \
   ICU(LoadPropertyWithInterceptorForLoad)             \
   ICU(LoadPropertyWithInterceptorForCall)             \
+  ICU(KeyedLoadPropertyWithInterceptor)               \
   ICU(StoreInterceptorProperty)
 
 //
@@ -223,8 +225,10 @@
   Object* Load(State state, Handle<Object> object, Handle<String> name);
 
   // Code generator routines.
-  static void GenerateInitialize(MacroAssembler* masm);
-  static void GeneratePreMonomorphic(MacroAssembler* masm);
+  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+  static void GeneratePreMonomorphic(MacroAssembler* masm) {
+    GenerateMiss(masm);
+  }
   static void GenerateMiss(MacroAssembler* masm);
   static void GenerateMegamorphic(MacroAssembler* masm);
   static void GenerateNormal(MacroAssembler* masm);
@@ -240,8 +244,6 @@
   static const int kOffsetToLoadInstruction;
 
  private:
-  static void Generate(MacroAssembler* masm, const ExternalReference& f);
-
   // Update the inline cache and the global stub cache based on the
   // lookup result.
   void UpdateCaches(LookupResult* lookup,
@@ -279,8 +281,11 @@
 
   // Code generator routines.
   static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateInitialize(MacroAssembler* masm);
-  static void GeneratePreMonomorphic(MacroAssembler* masm);
+  static void GenerateRuntimeGetProperty(MacroAssembler* masm);
+  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+  static void GeneratePreMonomorphic(MacroAssembler* masm) {
+    GenerateMiss(masm);
+  }
   static void GenerateGeneric(MacroAssembler* masm);
   static void GenerateString(MacroAssembler* masm);
 
@@ -290,6 +295,7 @@
   // for all other types.
   static void GenerateExternalArray(MacroAssembler* masm,
                                     ExternalArrayType array_type);
+  static void GenerateIndexedInterceptor(MacroAssembler* masm);
 
   // Clear the use of the inlined version.
   static void ClearInlinedVersion(Address address);
@@ -302,8 +308,6 @@
   static const int kSlowCaseBitFieldMask =
       (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
 
-  static void Generate(MacroAssembler* masm, const ExternalReference& f);
-
   // Update the inline cache.
   void UpdateCaches(LookupResult* lookup,
                     State state,
@@ -328,6 +332,10 @@
   }
   static Code* external_array_stub(JSObject::ElementsKind elements_kind);
 
+  static Code* indexed_interceptor_stub() {
+    return Builtins::builtin(Builtins::KeyedLoadIC_IndexedInterceptor);
+  }
+
   static void Clear(Address address, Code* target);
 
   // Support for patching the map that is checked in an inlined
@@ -351,7 +359,7 @@
   static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
   static void GenerateMiss(MacroAssembler* masm);
   static void GenerateMegamorphic(MacroAssembler* masm);
-  static void GenerateExtendStorage(MacroAssembler* masm);
+  static void GenerateArrayLength(MacroAssembler* masm);
 
  private:
   // Update the inline cache and the global stub cache based on the
@@ -384,10 +392,10 @@
                 Handle<Object> value);
 
   // Code generators for stub routines.  Only called once at startup.
-  static void GenerateInitialize(MacroAssembler* masm);
+  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
   static void GenerateMiss(MacroAssembler* masm);
+  static void GenerateRuntimeSetProperty(MacroAssembler* masm);
   static void GenerateGeneric(MacroAssembler* masm);
-  static void GenerateExtendStorage(MacroAssembler* masm);
 
   // Generators for external array types. See objects.h.
   // These are similar to the generic IC; they optimize the case of
@@ -403,8 +411,6 @@
   static void RestoreInlinedVersion(Address address);
 
  private:
-  static void Generate(MacroAssembler* masm, const ExternalReference& f);
-
   // Update the inline cache.
   void UpdateCaches(LookupResult* lookup,
                     State state,
diff --git a/src/json-delay.js b/src/json-delay.js
index 7788f51..3e42d36 100644
--- a/src/json-delay.js
+++ b/src/json-delay.js
@@ -80,8 +80,9 @@
 };
 
 function QuoteSingleJSONCharacter(c) {
-  if (c in characterQuoteCache)
+  if (c in characterQuoteCache) {
     return characterQuoteCache[c];
+  }
   var charCode = c.charCodeAt(0);
   var result;
   if (charCode < 16) result = '\\u000';
@@ -101,15 +102,17 @@
 function StackContains(stack, val) {
   var length = stack.length;
   for (var i = 0; i < length; i++) {
-    if (stack[i] === val)
+    if (stack[i] === val) {
       return true;
+    }
   }
   return false;
 }
 
 function SerializeArray(value, replacer, stack, indent, gap) {
-  if (StackContains(stack, value))
+  if (StackContains(stack, value)) {
     throw MakeTypeError('circular_structure', []);
+  }
   stack.push(value);
   var stepback = indent;
   indent += gap;
@@ -117,9 +120,10 @@
   var len = value.length;
   for (var i = 0; i < len; i++) {
     var strP = JSONSerialize($String(i), value, replacer, stack,
-        indent, gap);
-    if (IS_UNDEFINED(strP))
+                             indent, gap);
+    if (IS_UNDEFINED(strP)) {
       strP = "null";
+    }
     partial.push(strP);
   }
   var final;
@@ -137,8 +141,9 @@
 }
 
 function SerializeObject(value, replacer, stack, indent, gap) {
-  if (StackContains(stack, value))
+  if (StackContains(stack, value)) {
     throw MakeTypeError('circular_structure', []);
+  }
   stack.push(value);
   var stepback = indent;
   indent += gap;
@@ -188,17 +193,21 @@
   var value = holder[key];
   if (IS_OBJECT(value) && value) {
     var toJSON = value.toJSON;
-    if (IS_FUNCTION(toJSON))
+    if (IS_FUNCTION(toJSON)) {
       value = toJSON.call(value, key);
+    }
   }
-  if (IS_FUNCTION(replacer))
+  if (IS_FUNCTION(replacer)) {
     value = replacer.call(holder, key, value);
+  }
   // Unwrap value if necessary
   if (IS_OBJECT(value)) {
     if (IS_NUMBER_WRAPPER(value)) {
       value = $Number(value);
     } else if (IS_STRING_WRAPPER(value)) {
       value = $String(value);
+    } else if (IS_BOOLEAN_WRAPPER(value)) {
+      value = $Boolean(value);
     }
   }
   switch (typeof value) {
@@ -232,12 +241,17 @@
   }
   var gap;
   if (IS_NUMBER(space)) {
-    space = $Math.min(space, 100);
+    space = $Math.min(space, 10);
     gap = "";
-    for (var i = 0; i < space; i++)
+    for (var i = 0; i < space; i++) {
       gap += " ";
+    }
   } else if (IS_STRING(space)) {
-    gap = space;
+    if (space.length > 10) {
+      gap = space.substring(0, 10);
+    } else {
+      gap = space;
+    }
   } else {
     gap = "";
   }
diff --git a/src/jump-target-inl.h b/src/jump-target-inl.h
index 1f0676d..dcd615e 100644
--- a/src/jump-target-inl.h
+++ b/src/jump-target-inl.h
@@ -42,6 +42,9 @@
   } else if (target->is_copy()) {
     entry_frame_->elements_[target->index()].set_copied();
   }
+  if (direction_ == BIDIRECTIONAL) {
+    entry_frame_->elements_[index].set_number_info(NumberInfo::kUnknown);
+  }
 }
 
 } }  // namespace v8::internal
diff --git a/src/jump-target.cc b/src/jump-target.cc
index 3782f92..66764e6 100644
--- a/src/jump-target.cc
+++ b/src/jump-target.cc
@@ -101,6 +101,17 @@
         if (element == NULL || !element->is_valid()) break;
 
         element = element->Combine(&reaching_frames_[j]->elements_[i]);
+
+        FrameElement* other = &reaching_frames_[j]->elements_[i];
+        if (element != NULL && !element->is_copy()) {
+          ASSERT(other != NULL);
+          ASSERT(!other->is_copy());
+          // We overwrite the number information of one of the incoming frames.
+          // This is safe because we only use the frame for emitting merge code.
+          // The number information of incoming frames is not used anymore.
+          element->set_number_info(NumberInfo::Combine(element->number_info(),
+                                                       other->number_info()));
+        }
       }
       elements[i] = element;
     }
@@ -117,6 +128,7 @@
     // elements as copied exactly when they have a copy.  Undetermined
     // elements are initially recorded as if in memory.
     if (target != NULL) {
+      ASSERT(!target->is_copy());  // These initial elements are never copies.
       entry_frame_->elements_[index] = *target;
       InitializeEntryElement(index, target);
     }
@@ -125,7 +137,8 @@
   for (; index < length; index++) {
     FrameElement* target = elements[index];
     if (target == NULL) {
-      entry_frame_->elements_.Add(FrameElement::MemoryElement());
+      entry_frame_->elements_.Add(
+          FrameElement::MemoryElement(NumberInfo::kUninitialized));
     } else {
       entry_frame_->elements_.Add(*target);
       InitializeEntryElement(index, target);
@@ -142,9 +155,20 @@
       RegisterFile candidate_registers;
       int best_count = kMinInt;
       int best_reg_num = RegisterAllocator::kInvalidRegister;
+      NumberInfo::Type info = NumberInfo::kUninitialized;
 
       for (int j = 0; j < reaching_frames_.length(); j++) {
         FrameElement element = reaching_frames_[j]->elements_[i];
+        if (direction_ == BIDIRECTIONAL) {
+            info = NumberInfo::kUnknown;
+        } else if (!element.is_copy()) {
+          info = NumberInfo::Combine(info, element.number_info());
+        } else {
+          // New elements will not be copies, so get number information from
+          // backing element in the reaching frame.
+          info = NumberInfo::Combine(info,
+            reaching_frames_[j]->elements_[element.index()].number_info());
+        }
         is_synced = is_synced && element.is_synced();
         if (element.is_register() && !entry_frame_->is_used(element.reg())) {
           // Count the register occurrence and remember it if better
@@ -158,11 +182,17 @@
         }
       }
 
+      // We must have a number type information now (not for copied elements).
+      ASSERT(entry_frame_->elements_[i].is_copy()
+             || info != NumberInfo::kUninitialized);
+
       // If the value is synced on all frames, put it in memory.  This
       // costs nothing at the merge code but will incur a
       // memory-to-register move when the value is needed later.
       if (is_synced) {
         // Already recorded as a memory element.
+        // Set combined number info.
+        entry_frame_->elements_[i].set_number_info(info);
         continue;
       }
 
@@ -183,14 +213,28 @@
         bool is_copied = entry_frame_->elements_[i].is_copied();
         Register reg = RegisterAllocator::ToRegister(best_reg_num);
         entry_frame_->elements_[i] =
-            FrameElement::RegisterElement(reg,
-                                          FrameElement::NOT_SYNCED);
+            FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED,
+                                          NumberInfo::kUninitialized);
         if (is_copied) entry_frame_->elements_[i].set_copied();
         entry_frame_->set_register_location(reg, i);
       }
+      // Set combined number info.
+      entry_frame_->elements_[i].set_number_info(info);
     }
   }
 
+  // If we have incoming backward edges assert we forget all number information.
+#ifdef DEBUG
+  if (direction_ == BIDIRECTIONAL) {
+    for (int i = 0; i < length; ++i) {
+      if (!entry_frame_->elements_[i].is_copy()) {
+        ASSERT(entry_frame_->elements_[i].number_info() ==
+               NumberInfo::kUnknown);
+      }
+    }
+  }
+#endif
+
   // The stack pointer is at the highest synced element or the base of
   // the expression stack.
   int stack_pointer = length - 1;
diff --git a/src/liveedit.cc b/src/liveedit.cc
new file mode 100644
index 0000000..c50e007
--- /dev/null
+++ b/src/liveedit.cc
@@ -0,0 +1,87 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "liveedit.h"
+#include "compiler.h"
+#include "oprofile-agent.h"
+#include "scopes.h"
+#include "global-handles.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+
+class FunctionInfoListener {
+ public:
+  void FunctionStarted(FunctionLiteral* fun) {
+    // Implementation follows.
+  }
+
+  void FunctionDone() {
+    // Implementation follows.
+  }
+
+  void FunctionScope(Scope* scope) {
+    // Implementation follows.
+  }
+
+  void FunctionCode(Handle<Code> function_code) {
+    // Implementation follows.
+  }
+};
+
+static FunctionInfoListener* active_function_info_listener = NULL;
+
+LiveEditFunctionTracker::LiveEditFunctionTracker(FunctionLiteral* fun) {
+  if (active_function_info_listener != NULL) {
+    active_function_info_listener->FunctionStarted(fun);
+  }
+}
+LiveEditFunctionTracker::~LiveEditFunctionTracker() {
+  if (active_function_info_listener != NULL) {
+    active_function_info_listener->FunctionDone();
+  }
+}
+void LiveEditFunctionTracker::RecordFunctionCode(Handle<Code> code) {
+  if (active_function_info_listener != NULL) {
+    active_function_info_listener->FunctionCode(code);
+  }
+}
+void LiveEditFunctionTracker::RecordFunctionScope(Scope* scope) {
+  if (active_function_info_listener != NULL) {
+    active_function_info_listener->FunctionScope(scope);
+  }
+}
+bool LiveEditFunctionTracker::IsActive() {
+  return active_function_info_listener != NULL;
+}
+
+} }  // namespace v8::internal
diff --git a/src/liveedit.h b/src/liveedit.h
new file mode 100644
index 0000000..73aa7d3
--- /dev/null
+++ b/src/liveedit.h
@@ -0,0 +1,78 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LIVEEDIT_H_
+#define V8_LIVEEDIT_H_
+
+
+
+// Live Edit feature implementation.
+// User should be able to change script on already running VM. This feature
+// matches hot swap features in other frameworks.
+//
+// The basic use-case is when user spots some mistake in function body
+// from debugger and wishes to change the algorithm without restart.
+//
+// A single change always has a form of a simple replacement (in pseudo-code):
+//   script.source[positions, positions+length] = new_string;
+// Implementation first determines, which function's body includes this
+// change area. Then both old and new versions of script are fully compiled
+// in order to analyze, whether the function changed its outer scope
+// expectations (or number of parameters). If it didn't, function's code is
+// patched with a newly compiled code. If it did change, enclosing function
+// gets patched. All inner functions are left untouched, whatever happened
+// to them in a new script version. However, new version of code will
+// instantiate newly compiled functions.
+
+
+#include "compiler.h"
+
+namespace v8 {
+namespace internal {
+
+// This class collects some specific information on structure of functions
+// in a particular script. It gets called from compiler all the time, but
+// actually records any data only when liveedit operation is in process;
+// in any other time this class is very cheap.
+//
+// The primary interest of the Tracker is to record function scope structures
+// in order to analyze whether function code maybe safely patched (with new
+// code successfully reading existing data from function scopes). The Tracker
+// also collects compiled function codes.
+class LiveEditFunctionTracker {
+ public:
+  explicit LiveEditFunctionTracker(FunctionLiteral* fun);
+  ~LiveEditFunctionTracker();
+  void RecordFunctionCode(Handle<Code> code);
+  void RecordFunctionScope(Scope* scope);
+
+  static bool IsActive();
+};
+
+} }  // namespace v8::internal
+
+#endif /* V*_LIVEEDIT_H_ */
diff --git a/src/log-utils.cc b/src/log-utils.cc
index fd95604..722e0fc 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -351,15 +351,6 @@
 }
 
 
-void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
-  const int len = StrLength(str);
-  const int written = Log::Write(str, len);
-  if (written != len && write_failure_handler != NULL) {
-    write_failure_handler();
-  }
-}
-
-
 // Formatting string for back references to the whole line. E.g. "#2" means
 // "the second line above".
 const char* LogRecordCompressor::kLineBackwardReferenceFormat = "#%d";
diff --git a/src/log-utils.h b/src/log-utils.h
index 3e25b0e..b769e90 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -268,9 +268,6 @@
   // Write the log message to the log file currently opened.
   void WriteToLogFile();
 
-  // Write a null-terminated string to to the log file currently opened.
-  void WriteCStringToLogFile(const char* str);
-
   // A handler that is called when Log::Write fails.
   typedef void (*WriteFailureHandler)();
 
diff --git a/src/log.cc b/src/log.cc
index 5de7429..a3fef73 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -330,6 +330,8 @@
 const char** Logger::log_events_ = NULL;
 CompressionHelper* Logger::compression_helper_ = NULL;
 bool Logger::is_logging_ = false;
+int Logger::cpu_profiler_nesting_ = 0;
+int Logger::heap_profiler_nesting_ = 0;
 
 #define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name,
 const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
@@ -368,15 +370,6 @@
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 
-void Logger::Preamble(const char* content) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::IsEnabled() || !FLAG_log_code) return;
-  LogMessageBuilder msg;
-  msg.WriteCStringToLogFile(content);
-#endif
-}
-
-
 void Logger::StringEvent(const char* name, const char* value) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log) UncheckedStringEvent(name, value);
@@ -1164,53 +1157,61 @@
 }
 
 
-void Logger::PauseProfiler(int flags) {
+void Logger::PauseProfiler(int flags, int tag) {
   if (!Log::IsEnabled()) return;
-  const int active_modules = GetActiveProfilerModules();
-  const int modules_to_disable = active_modules & flags;
-  if (modules_to_disable == PROFILER_MODULE_NONE) return;
-
-  if (modules_to_disable & PROFILER_MODULE_CPU) {
-    profiler_->pause();
-    if (FLAG_prof_lazy) {
-      if (!FLAG_sliding_state_window) ticker_->Stop();
-      FLAG_log_code = false;
-      // Must be the same message as Log::kDynamicBufferSeal.
-      LOG(UncheckedStringEvent("profiler", "pause"));
+  if (flags & PROFILER_MODULE_CPU) {
+    // It is OK to have negative nesting.
+    if (--cpu_profiler_nesting_ == 0) {
+      profiler_->pause();
+      if (FLAG_prof_lazy) {
+        if (!FLAG_sliding_state_window) ticker_->Stop();
+        FLAG_log_code = false;
+        // Must be the same message as Log::kDynamicBufferSeal.
+        LOG(UncheckedStringEvent("profiler", "pause"));
+      }
     }
   }
-  if (modules_to_disable &
+  if (flags &
       (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
-    FLAG_log_gc = false;
+    if (--heap_profiler_nesting_ == 0) {
+      FLAG_log_gc = false;
+    }
   }
-  // Turn off logging if no active modules remain.
-  if ((active_modules & ~flags) == PROFILER_MODULE_NONE) {
+  if (tag != 0) {
+    IntEvent("close-tag", tag);
+  }
+  if (GetActiveProfilerModules() == PROFILER_MODULE_NONE) {
     is_logging_ = false;
   }
 }
 
 
-void Logger::ResumeProfiler(int flags) {
+void Logger::ResumeProfiler(int flags, int tag) {
   if (!Log::IsEnabled()) return;
-  const int modules_to_enable = ~GetActiveProfilerModules() & flags;
-  if (modules_to_enable != PROFILER_MODULE_NONE) {
-    is_logging_ = true;
+  if (tag != 0) {
+    IntEvent("open-tag", tag);
   }
-  if (modules_to_enable & PROFILER_MODULE_CPU) {
-    if (FLAG_prof_lazy) {
-      profiler_->Engage();
-      LOG(UncheckedStringEvent("profiler", "resume"));
-      FLAG_log_code = true;
-      LogCompiledFunctions();
-      LogFunctionObjects();
-      LogAccessorCallbacks();
-      if (!FLAG_sliding_state_window) ticker_->Start();
+  if (flags & PROFILER_MODULE_CPU) {
+    if (cpu_profiler_nesting_++ == 0) {
+      is_logging_ = true;
+      if (FLAG_prof_lazy) {
+        profiler_->Engage();
+        LOG(UncheckedStringEvent("profiler", "resume"));
+        FLAG_log_code = true;
+        LogCompiledFunctions();
+        LogFunctionObjects();
+        LogAccessorCallbacks();
+        if (!FLAG_sliding_state_window) ticker_->Start();
+      }
+      profiler_->resume();
     }
-    profiler_->resume();
   }
-  if (modules_to_enable &
+  if (flags &
       (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
-    FLAG_log_gc = true;
+    if (heap_profiler_nesting_++ == 0) {
+      is_logging_ = true;
+      FLAG_log_gc = true;
+    }
   }
 }
 
@@ -1219,7 +1220,7 @@
 // either from main or Profiler's thread.
 void Logger::StopLoggingAndProfiling() {
   Log::stop();
-  PauseProfiler(PROFILER_MODULE_CPU);
+  PauseProfiler(PROFILER_MODULE_CPU, 0);
 }
 
 
@@ -1261,7 +1262,9 @@
       case Code::FUNCTION:
         return;  // We log this later using LogCompiledFunctions.
       case Code::STUB:
-        description = CodeStub::MajorName(code_object->major_key());
+        description = CodeStub::MajorName(code_object->major_key(), true);
+        if (description == NULL)
+          description = "A stub from the snapshot";
         tag = Logger::STUB_TAG;
         break;
       case Code::BUILTIN:
@@ -1294,6 +1297,15 @@
 }
 
 
+void Logger::LogCodeObjects() {
+  AssertNoAllocation no_alloc;
+  HeapIterator iterator;
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+    if (obj->IsCode()) LogCodeObject(obj);
+  }
+}
+
+
 void Logger::LogCompiledFunctions() {
   HandleScope scope;
   const int compiled_funcs_count = EnumerateCompiledFunctions(NULL);
diff --git a/src/log.h b/src/log.h
index 1f6e60e..eb8369c 100644
--- a/src/log.h
+++ b/src/log.h
@@ -161,12 +161,6 @@
   // Enable the computation of a sliding window of states.
   static void EnableSlidingStateWindow();
 
-  // Write a raw string to the log to be used as a preamble.
-  // No check is made that the 'preamble' is actually at the beginning
-  // of the log. The preample is used to write code events saved in the
-  // snapshot.
-  static void Preamble(const char* content);
-
   // Emits an event with a string value -> (name, value).
   static void StringEvent(const char* name, const char* value);
 
@@ -277,8 +271,8 @@
   // Pause/Resume collection of profiling data.
   // When data collection is paused, CPU Tick events are discarded until
   // data collection is Resumed.
-  static void PauseProfiler(int flags);
-  static void ResumeProfiler(int flags);
+  static void PauseProfiler(int flags, int tag);
+  static void ResumeProfiler(int flags, int tag);
   static int GetActiveProfilerModules();
 
   // If logging is performed into a memory buffer, allows to
@@ -292,7 +286,7 @@
   // Logs all accessor callbacks found in the heap.
   static void LogAccessorCallbacks();
   // Used for logging stubs found in the snapshot.
-  static void LogCodeObject(Object* code_object);
+  static void LogCodeObjects();
 
  private:
 
@@ -325,6 +319,9 @@
   // Emits the source code of a regexp. Used by regexp events.
   static void LogRegExpSource(Handle<JSRegExp> regexp);
 
+  // Used for logging stubs found in the snapshot.
+  static void LogCodeObject(Object* code_object);
+
   // Emits a profiler tick event. Used by the profiler thread.
   static void TickEvent(TickSample* sample, bool overflow);
 
@@ -376,6 +373,8 @@
   friend class LoggerTestHelper;
 
   static bool is_logging_;
+  static int cpu_profiler_nesting_;
+  static int heap_profiler_nesting_;
 #else
   static bool is_logging() { return false; }
 #endif
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 0fe4328..81e5bf7 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -61,6 +61,8 @@
   RESULT_CONTAINS_TOP = 1 << 1
 };
 
+// Invalid depth in prototype chain.
+const int kInvalidProtoDepth = -1;
 
 #if V8_TARGET_ARCH_IA32
 #include "assembler.h"
@@ -86,6 +88,13 @@
 #endif
 #include "code.h"  // must be after assembler_*.h
 #include "arm/macro-assembler-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/constants-mips.h"
+#include "assembler.h"
+#include "mips/assembler-mips.h"
+#include "mips/assembler-mips-inl.h"
+#include "code.h"  // must be after assembler_*.h
+#include "mips/macro-assembler-mips.h"
 #else
 #error Unsupported target architecture.
 #endif
diff --git a/src/math.js b/src/math.js
index d804648..5745e61 100644
--- a/src/math.js
+++ b/src/math.js
@@ -233,7 +233,7 @@
                "SQRT2",
                1.4142135623730951,
                DONT_ENUM |  DONT_DELETE | READ_ONLY);
-  %TransformToFastProperties($Math);
+  %ToFastProperties($Math);
 
   // Setup non-enumerable functions of the Math object and
   // set their names.
diff --git a/src/messages.js b/src/messages.js
index df008c9..ca82afe 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -162,6 +162,8 @@
       value_and_accessor:           "Invalid property.  A property cannot both have accessors and be writable or have a value: %0",
       proto_object_or_null:         "Object prototype may only be an Object or null",
       property_desc_object:         "Property description must be an object: %0",
+      redefine_disallowed:          "Cannot redefine property: %0",
+      define_disallowed:            "Cannot define property, object is not extensible: %0",
       // RangeError
       invalid_array_length:         "Invalid array length",
       stack_overflow:               "Maximum call stack size exceeded",
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
new file mode 100644
index 0000000..2e63461
--- /dev/null
+++ b/src/mips/assembler-mips-inl.h
@@ -0,0 +1,215 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
+
+
+#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
+#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
+
+#include "mips/assembler-mips.h"
+#include "cpu.h"
+
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Condition
+
+Condition NegateCondition(Condition cc) {
+  ASSERT(cc != cc_always);
+  return static_cast<Condition>(cc ^ 1);
+}
+
+
+// -----------------------------------------------------------------------------
+// Operand and MemOperand
+
+Operand::Operand(int32_t immediate, RelocInfo::Mode rmode)  {
+  rm_ = no_reg;
+  imm32_ = immediate;
+  rmode_ = rmode;
+}
+
+Operand::Operand(const ExternalReference& f)  {
+  rm_ = no_reg;
+  imm32_ = reinterpret_cast<int32_t>(f.address());
+  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+Operand::Operand(const char* s) {
+  rm_ = no_reg;
+  imm32_ = reinterpret_cast<int32_t>(s);
+  rmode_ = RelocInfo::EMBEDDED_STRING;
+}
+
+Operand::Operand(Smi* value) {
+  rm_ = no_reg;
+  imm32_ =  reinterpret_cast<intptr_t>(value);
+  rmode_ = RelocInfo::NONE;
+}
+
+Operand::Operand(Register rm) {
+  rm_ = rm;
+}
+
+bool Operand::is_reg() const {
+  return rm_.is_valid();
+}
+
+
+
+// -----------------------------------------------------------------------------
+// RelocInfo
+
+void RelocInfo::apply(intptr_t delta) {
+  // On MIPS we do not use pc relative addressing, so we don't need to patch the
+  // code here.
+}
+
+
+Address RelocInfo::target_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return reinterpret_cast<Address>(pc_);
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return Handle<Object>(reinterpret_cast<Object**>(
+      Assembler::target_address_at(pc_)));
+}
+
+
+Object** RelocInfo::target_object_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return reinterpret_cast<Object**>(pc_);
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+}
+
+
+Address* RelocInfo::target_reference_address() {
+  ASSERT(rmode_ == EXTERNAL_REFERENCE);
+  return reinterpret_cast<Address*>(pc_);
+}
+
+
+Address RelocInfo::call_address() {
+  ASSERT(IsPatchedReturnSequence());
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+  ASSERT(IsPatchedReturnSequence());
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+}
+
+
+Object* RelocInfo::call_object() {
+  return *call_object_address();
+}
+
+
+Object** RelocInfo::call_object_address() {
+  ASSERT(IsPatchedReturnSequence());
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+  *call_object_address() = target;
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+#ifdef DEBUG
+  PrintF("%s - %d - %s : Checking for jal(r)",
+      __FILE__, __LINE__, __func__);
+#endif
+  return ((Assembler::instr_at(pc_) & kOpcodeMask) == SPECIAL) &&
+         (((Assembler::instr_at(pc_) & kFunctionFieldMask) == JAL) ||
+          ((Assembler::instr_at(pc_) & kFunctionFieldMask) == JALR));
+}
+
+
+// -----------------------------------------------------------------------------
+// Assembler
+
+
+void Assembler::CheckBuffer() {
+  if (buffer_space() <= kGap) {
+    GrowBuffer();
+  }
+}
+
+
+void Assembler::emit(Instr x) {
+  CheckBuffer();
+  *reinterpret_cast<Instr*>(pc_) = x;
+  pc_ += kInstrSize;
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_ASSEMBLER_MIPS_INL_H_
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
new file mode 100644
index 0000000..4a91624
--- /dev/null
+++ b/src/mips/assembler-mips.cc
@@ -0,0 +1,1208 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
+
+
+#include "v8.h"
+#include "mips/assembler-mips-inl.h"
+#include "serialize.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+
+const Register no_reg = { -1 };
+
+const Register zero_reg = { 0 };
+const Register at = { 1 };
+const Register v0 = { 2 };
+const Register v1 = { 3 };
+const Register a0 = { 4 };
+const Register a1 = { 5 };
+const Register a2 = { 6 };
+const Register a3 = { 7 };
+const Register t0 = { 8 };
+const Register t1 = { 9 };
+const Register t2 = { 10 };
+const Register t3 = { 11 };
+const Register t4 = { 12 };
+const Register t5 = { 13 };
+const Register t6 = { 14 };
+const Register t7 = { 15 };
+const Register s0 = { 16 };
+const Register s1 = { 17 };
+const Register s2 = { 18 };
+const Register s3 = { 19 };
+const Register s4 = { 20 };
+const Register s5 = { 21 };
+const Register s6 = { 22 };
+const Register s7 = { 23 };
+const Register t8 = { 24 };
+const Register t9 = { 25 };
+const Register k0 = { 26 };
+const Register k1 = { 27 };
+const Register gp = { 28 };
+const Register sp = { 29 };
+const Register s8_fp = { 30 };
+const Register ra = { 31 };
+
+
+const FPURegister no_creg = { -1 };
+
+const FPURegister f0 = { 0 };
+const FPURegister f1 = { 1 };
+const FPURegister f2 = { 2 };
+const FPURegister f3 = { 3 };
+const FPURegister f4 = { 4 };
+const FPURegister f5 = { 5 };
+const FPURegister f6 = { 6 };
+const FPURegister f7 = { 7 };
+const FPURegister f8 = { 8 };
+const FPURegister f9 = { 9 };
+const FPURegister f10 = { 10 };
+const FPURegister f11 = { 11 };
+const FPURegister f12 = { 12 };
+const FPURegister f13 = { 13 };
+const FPURegister f14 = { 14 };
+const FPURegister f15 = { 15 };
+const FPURegister f16 = { 16 };
+const FPURegister f17 = { 17 };
+const FPURegister f18 = { 18 };
+const FPURegister f19 = { 19 };
+const FPURegister f20 = { 20 };
+const FPURegister f21 = { 21 };
+const FPURegister f22 = { 22 };
+const FPURegister f23 = { 23 };
+const FPURegister f24 = { 24 };
+const FPURegister f25 = { 25 };
+const FPURegister f26 = { 26 };
+const FPURegister f27 = { 27 };
+const FPURegister f28 = { 28 };
+const FPURegister f29 = { 29 };
+const FPURegister f30 = { 30 };
+const FPURegister f31 = { 31 };
+
+int ToNumber(Register reg) {
+  ASSERT(reg.is_valid());
+  const int kNumbers[] = {
+    0,    // zero_reg
+    1,    // at
+    2,    // v0
+    3,    // v1
+    4,    // a0
+    5,    // a1
+    6,    // a2
+    7,    // a3
+    8,    // t0
+    9,    // t1
+    10,   // t2
+    11,   // t3
+    12,   // t4
+    13,   // t5
+    14,   // t6
+    15,   // t7
+    16,   // s0
+    17,   // s1
+    18,   // s2
+    19,   // s3
+    20,   // s4
+    21,   // s5
+    22,   // s6
+    23,   // s7
+    24,   // t8
+    25,   // t9
+    26,   // k0
+    27,   // k1
+    28,   // gp
+    29,   // sp
+    30,   // s8_fp
+    31,   // ra
+  };
+  return kNumbers[reg.code()];
+}
+
+Register ToRegister(int num) {
+  ASSERT(num >= 0 && num < kNumRegisters);
+  const Register kRegisters[] = {
+    zero_reg,
+    at,
+    v0, v1,
+    a0, a1, a2, a3,
+    t0, t1, t2, t3, t4, t5, t6, t7,
+    s0, s1, s2, s3, s4, s5, s6, s7,
+    t8, t9,
+    k0, k1,
+    gp,
+    sp,
+    s8_fp,
+    ra
+  };
+  return kRegisters[num];
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo.
+
+const int RelocInfo::kApplyMask = 0;
+
+// Patch the code at the current address with the supplied instructions.
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+  Instr* pc = reinterpret_cast<Instr*>(pc_);
+  Instr* instr = reinterpret_cast<Instr*>(instructions);
+  for (int i = 0; i < instruction_count; i++) {
+    *(pc + i) = *(instr + i);
+  }
+
+  // Indicate that code has changed.
+  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+  // Patch the code at the current address with a call to the target.
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand.
+// See assembler-mips-inl.h for inlined constructors.
+
+Operand::Operand(Handle<Object> handle) {
+  rm_ = no_reg;
+  // Verify all Objects referred by code are NOT in new space.
+  Object* obj = *handle;
+  ASSERT(!Heap::InNewSpace(obj));
+  if (obj->IsHeapObject()) {
+    imm32_ = reinterpret_cast<intptr_t>(handle.location());
+    rmode_ = RelocInfo::EMBEDDED_OBJECT;
+  } else {
+    // No relocation needed.
+    imm32_ = reinterpret_cast<intptr_t>(obj);
+    rmode_ = RelocInfo::NONE;
+  }
+}
+
+MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) {
+  offset_ = offset;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler.
+
+static const int kMinimalBufferSize = 4*KB;
+static byte* spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+  if (buffer == NULL) {
+    // Do our own buffer management.
+    if (buffer_size <= kMinimalBufferSize) {
+      buffer_size = kMinimalBufferSize;
+
+      if (spare_buffer_ != NULL) {
+        buffer = spare_buffer_;
+        spare_buffer_ = NULL;
+      }
+    }
+    if (buffer == NULL) {
+      buffer_ = NewArray<byte>(buffer_size);
+    } else {
+      buffer_ = static_cast<byte*>(buffer);
+    }
+    buffer_size_ = buffer_size;
+    own_buffer_ = true;
+
+  } else {
+    // Use externally provided buffer instead.
+    ASSERT(buffer_size > 0);
+    buffer_ = static_cast<byte*>(buffer);
+    buffer_size_ = buffer_size;
+    own_buffer_ = false;
+  }
+
+  // Setup buffer pointers.
+  ASSERT(buffer_ != NULL);
+  pc_ = buffer_;
+  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+  current_statement_position_ = RelocInfo::kNoPosition;
+  current_position_ = RelocInfo::kNoPosition;
+  written_statement_position_ = current_statement_position_;
+  written_position_ = current_position_;
+}
+
+
+Assembler::~Assembler() {
+  if (own_buffer_) {
+    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+      spare_buffer_ = buffer_;
+    } else {
+      DeleteArray(buffer_);
+    }
+  }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+  ASSERT(pc_ <= reloc_info_writer.pos());  // no overlap
+  // Setup code descriptor.
+  desc->buffer = buffer_;
+  desc->buffer_size = buffer_size_;
+  desc->instr_size = pc_offset();
+  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+
+// The link chain is terminated by a negative code position (must be aligned).
+const int kEndOfChain = -4;
+
+bool Assembler::is_branch(Instr instr) {
+  uint32_t opcode   = ((instr & kOpcodeMask));
+  uint32_t rt_field = ((instr & kRtFieldMask));
+  uint32_t rs_field = ((instr & kRsFieldMask));
+  // Checks if the instruction is a branch.
+  return opcode == BEQ ||
+      opcode == BNE ||
+      opcode == BLEZ ||
+      opcode == BGTZ ||
+      opcode == BEQL ||
+      opcode == BNEL ||
+      opcode == BLEZL ||
+      opcode == BGTZL||
+      (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
+                            rt_field == BLTZAL || rt_field == BGEZAL)) ||
+      (opcode == COP1 && rs_field == BC1);  // Coprocessor branch.
+}
+
+
+int Assembler::target_at(int32_t pos) {
+  Instr instr = instr_at(pos);
+  if ((instr & ~kImm16Mask) == 0) {
+    // Emitted label constant, not part of a branch.
+    return instr - (Code::kHeaderSize - kHeapObjectTag);
+  }
+  // Check we have a branch instruction.
+  ASSERT(is_branch(instr));
+  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
+  // the compiler uses arithmectic shifts for signed integers.
+  int32_t imm18 = ((instr &
+                    static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+
+  return pos + kBranchPCOffset + imm18;
+}
+
+
+void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
+  Instr instr = instr_at(pos);
+  if ((instr & ~kImm16Mask) == 0) {
+    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
+    // Emitted label constant, not part of a branch.
+    // Make label relative to Code* of generated Code object.
+    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+    return;
+  }
+
+  ASSERT(is_branch(instr));
+  int32_t imm18 = target_pos - (pos + kBranchPCOffset);
+  ASSERT((imm18 & 3) == 0);
+
+  instr &= ~kImm16Mask;
+  int32_t imm16 = imm18 >> 2;
+  ASSERT(is_int16(imm16));
+
+  instr_at_put(pos, instr | (imm16 & kImm16Mask));
+}
+
+
+void Assembler::print(Label* L) {
+  if (L->is_unused()) {
+    PrintF("unused label\n");
+  } else if (L->is_bound()) {
+    PrintF("bound label to %d\n", L->pos());
+  } else if (L->is_linked()) {
+    Label l = *L;
+    PrintF("unbound label");
+    while (l.is_linked()) {
+      PrintF("@ %d ", l.pos());
+      Instr instr = instr_at(l.pos());
+      if ((instr & ~kImm16Mask) == 0) {
+        PrintF("value\n");
+      } else {
+        PrintF("%d\n", instr);
+      }
+      next(&l);
+    }
+  } else {
+    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+  }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
+  while (L->is_linked()) {
+    int32_t fixup_pos = L->pos();
+    next(L);  // call next before overwriting link with target at fixup_pos
+    target_at_put(fixup_pos, pos);
+  }
+  L->bind_to(pos);
+
+  // Keep track of the last bound label so we don't eliminate any instructions
+  // before a bound label.
+  if (pos > last_bound_pos_)
+    last_bound_pos_ = pos;
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+  if (appendix->is_linked()) {
+    if (L->is_linked()) {
+      // Append appendix to L's list.
+      int fixup_pos;
+      int link = L->pos();
+      do {
+        fixup_pos = link;
+        link = target_at(fixup_pos);
+      } while (link > 0);
+      ASSERT(link == kEndOfChain);
+      target_at_put(fixup_pos, appendix->pos());
+    } else {
+      // L is empty, simply use appendix
+      *L = *appendix;
+    }
+  }
+  appendix->Unuse();  // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+  ASSERT(!L->is_bound());  // label can only be bound once
+  bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+  ASSERT(L->is_linked());
+  int link = target_at(L->pos());
+  if (link > 0) {
+    L->link_to(link);
+  } else {
+    ASSERT(link == kEndOfChain);
+    L->Unuse();
+  }
+}
+
+
+// We have to use a temporary register for things that can be relocated even
+// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
+// space.  There is no guarantee that the relocated location can be similarly
+// encoded.
+bool Assembler::MustUseAt(RelocInfo::Mode rmode) {
+  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+    return Serializer::enabled();
+  } else if (rmode == RelocInfo::NONE) {
+    return false;
+  }
+  return true;
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+                                 Register rs,
+                                 Register rt,
+                                 Register rd,
+                                 uint16_t sa,
+                                 SecondaryField func) {
+  ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
+  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+      | (rd.code() << kRdShift) | (sa << kSaShift) | func;
+  emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+                                 SecondaryField fmt,
+                                 FPURegister ft,
+                                 FPURegister fs,
+                                 FPURegister fd,
+                                 SecondaryField func) {
+  ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
+  Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << kFsShift)
+      | (fd.code() << 6) | func;
+  emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+                                 SecondaryField fmt,
+                                 Register rt,
+                                 FPURegister fs,
+                                 FPURegister fd,
+                                 SecondaryField func) {
+  ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
+  Instr instr = opcode | fmt | (rt.code() << kRtShift)
+      | (fs.code() << kFsShift) | (fd.code() << 6) | func;
+  emit(instr);
+}
+
+
+// Instructions with immediate value.
+// Registers are in the order of the instruction encoding, from left to right.
+void Assembler::GenInstrImmediate(Opcode opcode,
+                                  Register rs,
+                                  Register rt,
+                                  int32_t j) {
+  ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
+  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+      | (j & kImm16Mask);
+  emit(instr);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode,
+                                  Register rs,
+                                  SecondaryField SF,
+                                  int32_t j) {
+  ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
+  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
+  emit(instr);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode,
+                                  Register rs,
+                                  FPURegister ft,
+                                  int32_t j) {
+  ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
+  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
+      | (j & kImm16Mask);
+  emit(instr);
+}
+
+
+// Registers are in the order of the instruction encoding, from left to right.
+void Assembler::GenInstrJump(Opcode opcode,
+                              uint32_t address) {
+  ASSERT(is_uint26(address));
+  Instr instr = opcode | address;
+  emit(instr);
+}
+
+
+int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+  int32_t target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();  // L's link
+    } else {
+      target_pos = kEndOfChain;
+    }
+    L->link_to(pc_offset());
+  }
+
+  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+  return offset;
+}
+
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+  int target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();  // L's link
+    } else {
+      target_pos = kEndOfChain;
+    }
+    L->link_to(at_offset);
+    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+  }
+}
+
+
+//------- Branch and jump instructions --------
+
+void Assembler::b(int16_t offset) {
+  beq(zero_reg, zero_reg, offset);
+}
+
+
+void Assembler::bal(int16_t offset) {
+  bgezal(zero_reg, offset);
+}
+
+
+void Assembler::beq(Register rs, Register rt, int16_t offset) {
+  GenInstrImmediate(BEQ, rs, rt, offset);
+}
+
+
+void Assembler::bgez(Register rs, int16_t offset) {
+  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
+}
+
+
+void Assembler::bgezal(Register rs, int16_t offset) {
+  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
+}
+
+
+void Assembler::bgtz(Register rs, int16_t offset) {
+  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
+}
+
+
+void Assembler::blez(Register rs, int16_t offset) {
+  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
+}
+
+
+void Assembler::bltz(Register rs, int16_t offset) {
+  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
+}
+
+
+void Assembler::bltzal(Register rs, int16_t offset) {
+  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
+}
+
+
+void Assembler::bne(Register rs, Register rt, int16_t offset) {
+  GenInstrImmediate(BNE, rs, rt, offset);
+}
+
+
+void Assembler::j(int32_t target) {
+  ASSERT(is_uint28(target) && ((target & 3) == 0));
+  GenInstrJump(J, target >> 2);
+}
+
+
+void Assembler::jr(Register rs) {
+  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+}
+
+
+void Assembler::jal(int32_t target) {
+  ASSERT(is_uint28(target) && ((target & 3) == 0));
+  GenInstrJump(JAL, target >> 2);
+}
+
+
+void Assembler::jalr(Register rs, Register rd) {
+  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
+}
+
+
+//-------Data-processing-instructions---------
+
+// Arithmetic.
+
+void Assembler::add(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADD);
+}
+
+
+void Assembler::addu(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
+}
+
+
+void Assembler::addi(Register rd, Register rs, int32_t j) {
+  GenInstrImmediate(ADDI, rs, rd, j);
+}
+
+
+void Assembler::addiu(Register rd, Register rs, int32_t j) {
+  GenInstrImmediate(ADDIU, rs, rd, j);
+}
+
+
+void Assembler::sub(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUB);
+}
+
+
+void Assembler::subu(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
+}
+
+
+void Assembler::mul(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+}
+
+
+void Assembler::mult(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
+}
+
+
+void Assembler::multu(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
+}
+
+
+void Assembler::div(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
+}
+
+
+void Assembler::divu(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
+}
+
+
+// Logical.
+
+void Assembler::and_(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
+}
+
+
+void Assembler::andi(Register rt, Register rs, int32_t j) {
+  GenInstrImmediate(ANDI, rs, rt, j);
+}
+
+
+void Assembler::or_(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
+}
+
+
+void Assembler::ori(Register rt, Register rs, int32_t j) {
+  GenInstrImmediate(ORI, rs, rt, j);
+}
+
+
+void Assembler::xor_(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
+}
+
+
+void Assembler::xori(Register rt, Register rs, int32_t j) {
+  GenInstrImmediate(XORI, rs, rt, j);
+}
+
+
+void Assembler::nor(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
+}
+
+
+// Shifts.
+void Assembler::sll(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
+}
+
+
+void Assembler::sllv(Register rd, Register rt, Register rs) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
+}
+
+
+void Assembler::srl(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
+}
+
+
+void Assembler::srlv(Register rd, Register rt, Register rs) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
+}
+
+
+void Assembler::sra(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
+}
+
+
+void Assembler::srav(Register rd, Register rt, Register rs) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
+}
+
+
+//------------Memory-instructions-------------
+
+void Assembler::lb(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lbu(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lw(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sb(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sw(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lui(Register rd, int32_t j) {
+  GenInstrImmediate(LUI, zero_reg, rd, j);
+}
+
+
+//-------------Misc-instructions--------------
+
+// Break / Trap instructions.
+void Assembler::break_(uint32_t code) {
+  ASSERT((code & ~0xfffff) == 0);
+  Instr break_instr = SPECIAL | BREAK | (code << 6);
+  emit(break_instr);
+}
+
+
+void Assembler::tge(Register rs, Register rt, uint16_t code) {
+  ASSERT(is_uint10(code));
+  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
+      | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
+  ASSERT(is_uint10(code));
+  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
+      | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::tlt(Register rs, Register rt, uint16_t code) {
+  ASSERT(is_uint10(code));
+  Instr instr =
+      SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::tltu(Register rs, Register rt, uint16_t code) {
+  ASSERT(is_uint10(code));
+  Instr instr = SPECIAL | TLTU | rs.code() << kRsShift
+      | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::teq(Register rs, Register rt, uint16_t code) {
+  ASSERT(is_uint10(code));
+  Instr instr =
+      SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::tne(Register rs, Register rt, uint16_t code) {
+  ASSERT(is_uint10(code));
+  Instr instr =
+      SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+// Move from HI/LO register.
+
+void Assembler::mfhi(Register rd) {
+  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
+}
+
+
+void Assembler::mflo(Register rd) {
+  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
+}
+
+
+// Set on less than instructions.
+void Assembler::slt(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
+}
+
+
+void Assembler::sltu(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
+}
+
+
+void Assembler::slti(Register rt, Register rs, int32_t j) {
+  GenInstrImmediate(SLTI, rs, rt, j);
+}
+
+
+void Assembler::sltiu(Register rt, Register rs, int32_t j) {
+  GenInstrImmediate(SLTIU, rs, rt, j);
+}
+
+
+//--------Coprocessor-instructions----------------
+
+// Load, store, move.
+void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
+  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
+  GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::swc1(FPURegister fd, const MemOperand& src) {
+  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
+  GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::mtc1(FPURegister fs, Register rt) {
+  GenInstrRegister(COP1, MTC1, rt, fs, f0);
+}
+
+
+void Assembler::mthc1(FPURegister fs, Register rt) {
+  GenInstrRegister(COP1, MTHC1, rt, fs, f0);
+}
+
+
+void Assembler::mfc1(FPURegister fs, Register rt) {
+  GenInstrRegister(COP1, MFC1, rt, fs, f0);
+}
+
+
+void Assembler::mfhc1(FPURegister fs, Register rt) {
+  GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+}
+
+
+// Conversions.
+
+void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
+}
+
+
+void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
+}
+
+
+void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
+}
+
+
+void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
+}
+
+
+void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
+}
+
+
+void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
+}
+
+
+void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
+}
+
+
+void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
+}
+
+
+void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
+}
+
+
+void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
+}
+
+
+// Conditions.
+void Assembler::c(FPUCondition cond, SecondaryField fmt,
+    FPURegister ft, FPURegister fs, uint16_t cc) {
+  ASSERT(is_uint3(cc));
+  ASSERT((fmt & ~(31 << kRsShift)) == 0);
+  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
+      | cc << 8 | 3 << 4 | cond;
+  emit(instr);
+}
+
+
+void Assembler::bc1f(int16_t offset, uint16_t cc) {
+  ASSERT(is_uint3(cc));
+  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
+  emit(instr);
+}
+
+
+void Assembler::bc1t(int16_t offset, uint16_t cc) {
+  ASSERT(is_uint3(cc));
+  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
+  emit(instr);
+}
+
+
+// Debugging.
+void Assembler::RecordJSReturn() {
+  WriteRecordedPositions();
+  CheckBuffer();
+  RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+  if (FLAG_debug_code) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+  }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+  if (pos == RelocInfo::kNoPosition) return;
+  ASSERT(pos >= 0);
+  current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+  if (pos == RelocInfo::kNoPosition) return;
+  ASSERT(pos >= 0);
+  current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+  // Write the statement position if it is different from what was written last
+  // time.
+  if (current_statement_position_ != written_statement_position_) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+    written_statement_position_ = current_statement_position_;
+  }
+
+  // Write the position if it is different from what was written last time and
+  // also different from the written statement position.
+  if (current_position_ != written_position_ &&
+      current_position_ != written_statement_position_) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::POSITION, current_position_);
+    written_position_ = current_position_;
+  }
+}
+
+
+void Assembler::GrowBuffer() {
+  if (!own_buffer_) FATAL("external code buffer is too small");
+
+  // Compute new buffer size.
+  CodeDesc desc;  // the new buffer
+  if (buffer_size_ < 4*KB) {
+    desc.buffer_size = 4*KB;
+  } else if (buffer_size_ < 1*MB) {
+    desc.buffer_size = 2*buffer_size_;
+  } else {
+    desc.buffer_size = buffer_size_ + 1*MB;
+  }
+  CHECK_GT(desc.buffer_size, 0);  // no overflow
+
+  // Setup new buffer.
+  desc.buffer = NewArray<byte>(desc.buffer_size);
+
+  desc.instr_size = pc_offset();
+  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+  // Copy the data.
+  int pc_delta = desc.buffer - buffer_;
+  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+  memmove(desc.buffer, buffer_, desc.instr_size);
+  memmove(reloc_info_writer.pos() + rc_delta,
+          reloc_info_writer.pos(), desc.reloc_size);
+
+  // Switch buffers.
+  DeleteArray(buffer_);
+  buffer_ = desc.buffer;
+  buffer_size_ = desc.buffer_size;
+  pc_ += pc_delta;
+  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+                               reloc_info_writer.last_pc() + pc_delta);
+
+
+  // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
+  // shift by pc_delta. But on MIPS the target address it directly loaded, so
+  // we do not need to relocate here.
+
+  ASSERT(!overflow());
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
+  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+    // Adjust code for new modes.
+    ASSERT(RelocInfo::IsJSReturn(rmode)
+           || RelocInfo::IsComment(rmode)
+           || RelocInfo::IsPosition(rmode));
+    // These modes do not need an entry in the constant pool.
+  }
+  if (rinfo.rmode() != RelocInfo::NONE) {
+    // Don't record external references unless the heap will be serialized.
+    if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+        !Serializer::enabled() &&
+        !FLAG_debug_code) {
+      return;
+    }
+    ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
+    reloc_info_writer.Write(&rinfo);
+  }
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+  Instr instr1 = instr_at(pc);
+  Instr instr2 = instr_at(pc + kInstrSize);
+  // Check we have 2 instructions generated by li.
+  ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
+         ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
+                            (instr2 & kOpcodeMask) == ORI ||
+                            (instr2 & kOpcodeMask) == LUI)));
+  // Interpret these 2 instructions.
+  if (instr1 == nopInstr) {
+    if ((instr2 & kOpcodeMask) == ADDI) {
+      return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
+    } else if ((instr2 & kOpcodeMask) == ORI) {
+      return reinterpret_cast<Address>(instr2 & kImm16Mask);
+    } else if ((instr2 & kOpcodeMask) == LUI) {
+      return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
+    }
+  } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
+    // 32 bits value.
+    return reinterpret_cast<Address>(
+        (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
+  }
+
+  // We should never get here.
+  UNREACHABLE();
+  return (Address)0x0;
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+  // On MIPS we need to patch the code to generate.
+
+  // First check we have a li.
+  Instr instr2 = instr_at(pc + kInstrSize);
+#ifdef DEBUG
+  Instr instr1 = instr_at(pc);
+
+  // Check we have indeed the result from a li with MustUseAt true.
+  CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
+        ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
+                           (instr2 & kOpcodeMask)== ORI ||
+                           (instr2 & kOpcodeMask)== LUI)));
+#endif
+
+
+  uint32_t rt_code = (instr2 & kRtFieldMask);
+  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+  uint32_t itarget = reinterpret_cast<uint32_t>(target);
+
+  if (is_int16(itarget)) {
+    // nop
+    // addiu rt zero_reg j
+    *p = nopInstr;
+    *(p+1) = ADDIU | rt_code | (itarget & LOMask);
+  } else if (!(itarget & HIMask)) {
+    // nop
+    // ori rt zero_reg j
+    *p = nopInstr;
+    *(p+1) = ORI | rt_code | (itarget & LOMask);
+  } else if (!(itarget & LOMask)) {
+    // nop
+    // lui rt (HIMask & itarget)>>16
+    *p = nopInstr;
+    *(p+1) = LUI | rt_code | ((itarget & HIMask)>>16);
+  } else {
+    // lui rt (HIMask & itarget)>>16
+    // ori rt rt, (LOMask & itarget)
+    *p = LUI | rt_code | ((itarget & HIMask)>>16);
+    *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & LOMask);
+  }
+
+  CPU::FlushICache(pc, 2 * sizeof(int32_t));
+}
+
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
new file mode 100644
index 0000000..4f5ae3e
--- /dev/null
+++ b/src/mips/assembler-mips.h
@@ -0,0 +1,663 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
+
+
+#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
+#define V8_MIPS_ASSEMBLER_MIPS_H_
+
+#include <stdio.h>
+#include "assembler.h"
+#include "constants-mips.h"
+#include "serialize.h"
+
+using namespace assembler::mips;
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and FPURegister
+
+// Core register.
+struct Register {
+  bool is_valid() const  { return 0 <= code_ && code_ < kNumRegisters; }
+  bool is(Register reg) const  { return code_ == reg.code_; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  // Unfortunately we can't make this private in a struct.
+  int code_;
+};
+
+extern const Register no_reg;
+
+extern const Register zero_reg;
+extern const Register at;
+extern const Register v0;
+extern const Register v1;
+extern const Register a0;
+extern const Register a1;
+extern const Register a2;
+extern const Register a3;
+extern const Register t0;
+extern const Register t1;
+extern const Register t2;
+extern const Register t3;
+extern const Register t4;
+extern const Register t5;
+extern const Register t6;
+extern const Register t7;
+extern const Register s0;
+extern const Register s1;
+extern const Register s2;
+extern const Register s3;
+extern const Register s4;
+extern const Register s5;
+extern const Register s6;
+extern const Register s7;
+extern const Register t8;
+extern const Register t9;
+extern const Register k0;
+extern const Register k1;
+extern const Register gp;
+extern const Register sp;
+extern const Register s8_fp;
+extern const Register ra;
+
+int ToNumber(Register reg);
+
+Register ToRegister(int num);
+
+// Coprocessor register.
+struct FPURegister {
+  bool is_valid() const  { return 0 <= code_ && code_ < kNumFPURegister ; }
+  bool is(FPURegister creg) const  { return code_ == creg.code_; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    ASSERT(is_valid());
+    return 1 << code_;
+  }
+
+  // Unfortunately we can't make this private in a struct.
+  int code_;
+};
+
+extern const FPURegister no_creg;
+
+extern const FPURegister f0;
+extern const FPURegister f1;
+extern const FPURegister f2;
+extern const FPURegister f3;
+extern const FPURegister f4;
+extern const FPURegister f5;
+extern const FPURegister f6;
+extern const FPURegister f7;
+extern const FPURegister f8;
+extern const FPURegister f9;
+extern const FPURegister f10;
+extern const FPURegister f11;
+extern const FPURegister f12;  // arg
+extern const FPURegister f13;
+extern const FPURegister f14;  // arg
+extern const FPURegister f15;
+extern const FPURegister f16;
+extern const FPURegister f17;
+extern const FPURegister f18;
+extern const FPURegister f19;
+extern const FPURegister f20;
+extern const FPURegister f21;
+extern const FPURegister f22;
+extern const FPURegister f23;
+extern const FPURegister f24;
+extern const FPURegister f25;
+extern const FPURegister f26;
+extern const FPURegister f27;
+extern const FPURegister f28;
+extern const FPURegister f29;
+extern const FPURegister f30;
+extern const FPURegister f31;
+
+
+// Returns the equivalent of !cc.
+// Negation of the default no_condition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc);
+
+inline Condition ReverseCondition(Condition cc) {
+  switch (cc) {
+    case Uless:
+      return Ugreater;
+    case Ugreater:
+      return Uless;
+    case Ugreater_equal:
+      return Uless_equal;
+    case Uless_equal:
+      return Ugreater_equal;
+    case less:
+      return greater;
+    case greater:
+      return less;
+    case greater_equal:
+      return less_equal;
+    case less_equal:
+      return greater_equal;
+    default:
+      return cc;
+  };
+}
+
+
+enum Hint {
+  no_hint = 0
+};
+
+inline Hint NegateHint(Hint hint) {
+  return no_hint;
+}
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands.
+
+// Class Operand represents a shifter operand in data processing instructions.
+class Operand BASE_EMBEDDED {
+ public:
+  // Immediate.
+  INLINE(explicit Operand(int32_t immediate,
+         RelocInfo::Mode rmode = RelocInfo::NONE));
+  INLINE(explicit Operand(const ExternalReference& f));
+  INLINE(explicit Operand(const char* s));
+  INLINE(explicit Operand(Object** opp));
+  INLINE(explicit Operand(Context** cpp));
+  explicit Operand(Handle<Object> handle);
+  INLINE(explicit Operand(Smi* value));
+
+  // Register.
+  INLINE(explicit Operand(Register rm));
+
+  // Return true if this is a register operand.
+  INLINE(bool is_reg() const);
+
+  Register rm() const { return rm_; }
+
+ private:
+  Register rm_;
+  int32_t imm32_;  // Valid if rm_ == no_reg
+  RelocInfo::Mode rmode_;
+
+  friend class Assembler;
+  friend class MacroAssembler;
+};
+
+
+// On MIPS we have only one adressing mode with base_reg + offset.
+// Class MemOperand represents a memory operand in load and store instructions.
+class MemOperand : public Operand {
+ public:
+
+  explicit MemOperand(Register rn, int16_t offset = 0);
+
+ private:
+  int16_t offset_;
+
+  friend class Assembler;
+};
+
+
+class Assembler : public Malloced {
+ public:
+  // Create an assembler. Instructions and relocation information are emitted
+  // into a buffer, with the instructions starting from the beginning and the
+  // relocation information starting from the end of the buffer. See CodeDesc
+  // for a detailed comment on the layout (globals.h).
+  //
+  // If the provided buffer is NULL, the assembler allocates and grows its own
+  // buffer, and buffer_size determines the initial buffer size. The buffer is
+  // owned by the assembler and deallocated upon destruction of the assembler.
+  //
+  // If the provided buffer is not NULL, the assembler uses the provided buffer
+  // for code generation and assumes its size to be buffer_size. If the buffer
+  // is too small, a fatal error occurs. No deallocation of the buffer is done
+  // upon destruction of the assembler.
+  Assembler(void* buffer, int buffer_size);
+  ~Assembler();
+
+  // GetCode emits any pending (non-emitted) code and fills the descriptor
+  // desc. GetCode() is idempotent; it returns the same result if no other
+  // Assembler functions are invoked in between GetCode() calls.
+  void GetCode(CodeDesc* desc);
+
+  // Label operations & relative jumps (PPUM Appendix D).
+  //
+  // Takes a branch opcode (cc) and a label (L) and generates
+  // either a backward branch or a forward branch and links it
+  // to the label fixup chain. Usage:
+  //
+  // Label L;    // unbound label
+  // j(cc, &L);  // forward branch to unbound label
+  // bind(&L);   // bind label to the current pc
+  // j(cc, &L);  // backward branch to bound label
+  // bind(&L);   // illegal: a label may be bound only once
+  //
+  // Note: The same Label can be used for forward and backward branches
+  // but it may be bound only once.
+  void bind(Label* L);  // binds an unbound label L to the current code position
+
+  // Returns the branch offset to the given label from the current code position
+  // Links the label to the current position if it is still unbound
+  // Manages the jump elimination optimization if the second parameter is true.
+  int32_t branch_offset(Label* L, bool jump_elimination_allowed);
+  int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
+    int32_t o = branch_offset(L, jump_elimination_allowed);
+    ASSERT((o & 3) == 0);   // Assert the offset is aligned.
+    return o >> 2;
+  }
+
+  // Puts a labels target address at the given position.
+  // The high 8 bits are set to zero.
+  void label_at_put(Label* L, int at_offset);
+
+  // Size of an instruction.
+  static const int kInstrSize = sizeof(Instr);
+
+  // Difference between address of current opcode and target address offset.
+  static const int kBranchPCOffset = 4;
+
+  // Read/Modify the code target address in the branch/call instruction at pc.
+  static Address target_address_at(Address pc);
+  static void set_target_address_at(Address pc, Address target);
+
+  // This sets the branch destination (which gets loaded at the call address).
+  // This is for calls and branches within generated code.
+  inline static void set_target_at(Address instruction_payload,
+                                   Address target) {
+    set_target_address_at(instruction_payload, target);
+  }
+
+  // This sets the branch destination.
+  // This is for calls and branches to runtime code.
+  inline static void set_external_target_at(Address instruction_payload,
+                                            Address target) {
+    set_target_address_at(instruction_payload, target);
+  }
+
+  static const int kCallTargetSize = 3 * kPointerSize;
+  static const int kExternalTargetSize = 3 * kPointerSize;
+
+  // Distance between the instruction referring to the address of the call
+  // target and the return address.
+  static const int kCallTargetAddressOffset = 4 * kInstrSize;
+
+  // Distance between start of patched return sequence and the emitted address
+  // to jump to.
+  static const int kPatchReturnSequenceAddressOffset = kInstrSize;
+
+
+  // ---------------------------------------------------------------------------
+  // Code generation.
+
+  void nop() { sll(zero_reg, zero_reg, 0); }
+
+
+  //------- Branch and jump  instructions --------
+  // We don't use likely variant of instructions.
+  void b(int16_t offset);
+  void b(Label* L) { b(branch_offset(L, false)>>2); }
+  void bal(int16_t offset);
+  void bal(Label* L) { bal(branch_offset(L, false)>>2); }
+
+  void beq(Register rs, Register rt, int16_t offset);
+  void beq(Register rs, Register rt, Label* L) {
+    beq(rs, rt, branch_offset(L, false) >> 2);
+  }
+  void bgez(Register rs, int16_t offset);
+  void bgezal(Register rs, int16_t offset);
+  void bgtz(Register rs, int16_t offset);
+  void blez(Register rs, int16_t offset);
+  void bltz(Register rs, int16_t offset);
+  void bltzal(Register rs, int16_t offset);
+  void bne(Register rs, Register rt, int16_t offset);
+  void bne(Register rs, Register rt, Label* L) {
+    bne(rs, rt, branch_offset(L, false)>>2);
+  }
+
+  // Never use the int16_t b(l)cond version with a branch offset
+  // instead of using the Label* version. See Twiki for infos.
+
+  // Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
+  void j(int32_t target);
+  void jal(int32_t target);
+  void jalr(Register rs, Register rd = ra);
+  void jr(Register target);
+
+
+  //-------Data-processing-instructions---------
+
+  // Arithmetic.
+  void add(Register rd, Register rs, Register rt);
+  void addu(Register rd, Register rs, Register rt);
+  void sub(Register rd, Register rs, Register rt);
+  void subu(Register rd, Register rs, Register rt);
+  void mult(Register rs, Register rt);
+  void multu(Register rs, Register rt);
+  void div(Register rs, Register rt);
+  void divu(Register rs, Register rt);
+  void mul(Register rd, Register rs, Register rt);
+
+  void addi(Register rd, Register rs, int32_t j);
+  void addiu(Register rd, Register rs, int32_t j);
+
+  // Logical.
+  void and_(Register rd, Register rs, Register rt);
+  void or_(Register rd, Register rs, Register rt);
+  void xor_(Register rd, Register rs, Register rt);
+  void nor(Register rd, Register rs, Register rt);
+
+  void andi(Register rd, Register rs, int32_t j);
+  void ori(Register rd, Register rs, int32_t j);
+  void xori(Register rd, Register rs, int32_t j);
+  void lui(Register rd, int32_t j);
+
+  // Shifts.
+  void sll(Register rd, Register rt, uint16_t sa);
+  void sllv(Register rd, Register rt, Register rs);
+  void srl(Register rd, Register rt, uint16_t sa);
+  void srlv(Register rd, Register rt, Register rs);
+  void sra(Register rt, Register rd, uint16_t sa);
+  void srav(Register rt, Register rd, Register rs);
+
+
+  //------------Memory-instructions-------------
+
+  void lb(Register rd, const MemOperand& rs);
+  void lbu(Register rd, const MemOperand& rs);
+  void lw(Register rd, const MemOperand& rs);
+  void sb(Register rd, const MemOperand& rs);
+  void sw(Register rd, const MemOperand& rs);
+
+
+  //-------------Misc-instructions--------------
+
+  // Break / Trap instructions.
+  void break_(uint32_t code);
+  void tge(Register rs, Register rt, uint16_t code);
+  void tgeu(Register rs, Register rt, uint16_t code);
+  void tlt(Register rs, Register rt, uint16_t code);
+  void tltu(Register rs, Register rt, uint16_t code);
+  void teq(Register rs, Register rt, uint16_t code);
+  void tne(Register rs, Register rt, uint16_t code);
+
+  // Move from HI/LO register.
+  void mfhi(Register rd);
+  void mflo(Register rd);
+
+  // Set on less than.
+  void slt(Register rd, Register rs, Register rt);
+  void sltu(Register rd, Register rs, Register rt);
+  void slti(Register rd, Register rs, int32_t j);
+  void sltiu(Register rd, Register rs, int32_t j);
+
+
+  //--------Coprocessor-instructions----------------
+
+  // Load, store, and move.
+  void lwc1(FPURegister fd, const MemOperand& src);
+  void ldc1(FPURegister fd, const MemOperand& src);
+
+  void swc1(FPURegister fs, const MemOperand& dst);
+  void sdc1(FPURegister fs, const MemOperand& dst);
+
+  // When paired with MTC1 to write a value to a 64-bit FPR, the MTC1 must be
+  // executed first, followed by the MTHC1.
+  void mtc1(FPURegister fs, Register rt);
+  void mthc1(FPURegister fs, Register rt);
+  void mfc1(FPURegister fs, Register rt);
+  void mfhc1(FPURegister fs, Register rt);
+
+  // Conversion.
+  void cvt_w_s(FPURegister fd, FPURegister fs);
+  void cvt_w_d(FPURegister fd, FPURegister fs);
+
+  void cvt_l_s(FPURegister fd, FPURegister fs);
+  void cvt_l_d(FPURegister fd, FPURegister fs);
+
+  void cvt_s_w(FPURegister fd, FPURegister fs);
+  void cvt_s_l(FPURegister fd, FPURegister fs);
+  void cvt_s_d(FPURegister fd, FPURegister fs);
+
+  void cvt_d_w(FPURegister fd, FPURegister fs);
+  void cvt_d_l(FPURegister fd, FPURegister fs);
+  void cvt_d_s(FPURegister fd, FPURegister fs);
+
+  // Conditions and branches.
+  void c(FPUCondition cond, SecondaryField fmt,
+         FPURegister ft, FPURegister fs, uint16_t cc = 0);
+
+  void bc1f(int16_t offset, uint16_t cc = 0);
+  void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
+  void bc1t(int16_t offset, uint16_t cc = 0);
+  void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
+
+
+  // Check the code size generated from label to here.
+  int InstructionsGeneratedSince(Label* l) {
+    return (pc_offset() - l->pos()) / kInstrSize;
+  }
+
+  // Debugging.
+
+  // Mark address of the ExitJSFrame code.
+  void RecordJSReturn();
+
+  // Record a comment relocation entry that can be used by a disassembler.
+  // Use --debug_code to enable.
+  void RecordComment(const char* msg);
+
+  void RecordPosition(int pos);
+  void RecordStatementPosition(int pos);
+  void WriteRecordedPositions();
+
+  int32_t pc_offset() const { return pc_ - buffer_; }
+  int32_t current_position() const { return current_position_; }
+  int32_t current_statement_position() const { return current_position_; }
+
+  // Check if there is less than kGap bytes available in the buffer.
+  // If this is the case, we need to grow the buffer before emitting
+  // an instruction or relocation information.
+  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+  // Get the number of bytes available in the buffer.
+  inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+
+ protected:
+  int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+  // Read/patch instructions.
+  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+  void instr_at_put(byte* pc, Instr instr) {
+    *reinterpret_cast<Instr*>(pc) = instr;
+  }
+  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+  void instr_at_put(int pos, Instr instr) {
+    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+  }
+
+  // Check if an instruction is a branch of some kind.
+  bool is_branch(Instr instr);
+
+  // Decode branch instruction at pos and return branch target pos.
+  int target_at(int32_t pos);
+
+  // Patch branch instruction at pos to branch to given branch target pos.
+  void target_at_put(int32_t pos, int32_t target_pos);
+
+  // Say if we need to relocate with this mode.
+  bool MustUseAt(RelocInfo::Mode rmode);
+
+  // Record reloc info for current pc_.
+  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ private:
+  // Code buffer:
+  // The buffer into which code and relocation info are generated.
+  byte* buffer_;
+  int buffer_size_;
+  // True if the assembler owns the buffer, false if buffer is external.
+  bool own_buffer_;
+
+  // Buffer size and constant pool distance are checked together at regular
+  // intervals of kBufferCheckInterval emitted bytes.
+  static const int kBufferCheckInterval = 1*KB/2;
+
+  // Code generation.
+  // The relocation writer's position is at least kGap bytes below the end of
+  // the generated instructions. This is so that multi-instruction sequences do
+  // not have to check for overflow. The same is true for writes of large
+  // relocation info entries.
+  static const int kGap = 32;
+  byte* pc_;  // The program counter - moves forward.
+
+  // Relocation information generation.
+  // Each relocation is encoded as a variable size value.
+  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+  RelocInfoWriter reloc_info_writer;
+
+  // The bound position, before this we cannot do instruction elimination.
+  int last_bound_pos_;
+
+  // Source position information.
+  int current_position_;
+  int current_statement_position_;
+  int written_position_;
+  int written_statement_position_;
+
+  // Code emission.
+  inline void CheckBuffer();
+  void GrowBuffer();
+  inline void emit(Instr x);
+
+  // Instruction generation.
+  // We have 3 different kind of encoding layout on MIPS.
+  // However due to many different types of objects encoded in the same fields
+  // we have quite a few aliases for each mode.
+  // Using the same structure to refer to Register and FPURegister would spare a
+  // few aliases, but mixing both does not look clean to me.
+  // Anyway we could surely implement this differently.
+
+  void GenInstrRegister(Opcode opcode,
+                        Register rs,
+                        Register rt,
+                        Register rd,
+                        uint16_t sa = 0,
+                        SecondaryField func = NULLSF);
+
+  void GenInstrRegister(Opcode opcode,
+                        SecondaryField fmt,
+                        FPURegister ft,
+                        FPURegister fs,
+                        FPURegister fd,
+                        SecondaryField func = NULLSF);
+
+  void GenInstrRegister(Opcode opcode,
+                        SecondaryField fmt,
+                        Register rt,
+                        FPURegister fs,
+                        FPURegister fd,
+                        SecondaryField func = NULLSF);
+
+
+  void GenInstrImmediate(Opcode opcode,
+                         Register rs,
+                         Register rt,
+                         int32_t  j);
+  void GenInstrImmediate(Opcode opcode,
+                         Register rs,
+                         SecondaryField SF,
+                         int32_t  j);
+  void GenInstrImmediate(Opcode opcode,
+                         Register r1,
+                         FPURegister r2,
+                         int32_t  j);
+
+
+  void GenInstrJump(Opcode opcode,
+                     uint32_t address);
+
+
+  // Labels.
+  void print(Label* L);
+  void bind_to(Label* L, int pos);
+  void link_to(Label* L, Label* appendix);
+  void next(Label* L);
+
+  friend class RegExpMacroAssemblerMIPS;
+  friend class RelocInfo;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_ASSEMBLER_MIPS_H_
+
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
new file mode 100644
index 0000000..3bd42ed
--- /dev/null
+++ b/src/mips/builtins-mips.cc
@@ -0,0 +1,109 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+                                CFunctionId id,
+                                BuiltinExtraArguments extra_args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/codegen-mips-inl.h b/src/mips/codegen-mips-inl.h
new file mode 100644
index 0000000..2a77715
--- /dev/null
+++ b/src/mips/codegen-mips-inl.h
@@ -0,0 +1,56 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
+#define V8_MIPS_CODEGEN_MIPS_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { __ b(&entry_label_); }
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_CODEGEN_MIPS_INL_H_
+
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
new file mode 100644
index 0000000..5a27c28
--- /dev/null
+++ b/src/mips/codegen-mips.cc
@@ -0,0 +1,501 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "parser.h"
+#include "register-allocator-inl.h"
+#include "runtime.h"
+#include "scopes.h"
+#include "compiler.h"
+
+
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+
+void DeferredCode::SaveRegisters() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void DeferredCode::RestoreRegisters() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator implementation
+
+CodeGenerator::CodeGenerator(MacroAssembler* masm)
+    : deferred_(8),
+      masm_(masm),
+      scope_(NULL),
+      frame_(NULL),
+      allocator_(NULL),
+      cc_reg_(cc_always),
+      state_(NULL),
+      function_return_is_shadowed_(false) {
+}
+
+
+// Calling conventions:
+// s8_fp: caller's frame pointer
+// sp: stack pointer
+// a1: called JS function
+// cp: callee's context
+
+void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitBlock(Block* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitDeclaration(Declaration* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitIfStatement(IfStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitForStatement(ForStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitConditional(Conditional* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitSlot(Slot* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitLiteral(Literal* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitAssignment(Assignment* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitThrow(Throw* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitProperty(Property* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCall(Call* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCallNew(CallNew* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// This should generate code that performs a charCodeAt() call or returns
+// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
+// It is not yet implemented on ARM, so it always goes to the slow case.
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() { return true; }
+#endif
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+// On entry a0 and a1 are the things to be compared.  On exit v0 is 0,
+// positive or negative to indicate the result of the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x765);
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x790);
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x808);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+                                          UncatchableExceptionType type) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x815);
+}
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+                              Label* throw_normal_exception,
+                              Label* throw_termination_exception,
+                              Label* throw_out_of_memory_exception,
+                              bool do_gc,
+                              bool always_allocate) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x826);
+}
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x831);
+}
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+  UNIMPLEMENTED_MIPS();
+  // Load a result.
+  __ li(v0, Operand(0x1234));
+  __ jr(ra);
+  // Return
+  __ nop();
+}
+
+
+// This stub performs an instanceof, calling the builtin function if
+// necessary.  Uses a1 for the object, a0 for the function that it may
+// be an instance of (these are fetched from the stack).
+void InstanceofStub::Generate(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x845);
+}
+
+
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x851);
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x857);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x863);
+}
+
+
+const char* CompareStub::GetName() {
+  UNIMPLEMENTED_MIPS();
+  return NULL;  // UNIMPLEMENTED RETURN
+}
+
+
+int CompareStub::MinorKey() {
+  // Encode the two parameters in a unique 16 bit value.
+  ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
+  return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
new file mode 100644
index 0000000..05138bc
--- /dev/null
+++ b/src/mips/codegen-mips.h
@@ -0,0 +1,311 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_MIPS_CODEGEN_MIPS_H_
+#define V8_MIPS_CODEGEN_MIPS_H_
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations
+class CompilationInfo;
+class DeferredCode;
+class RegisterAllocator;
+class RegisterFile;
+
+enum InitState { CONST_INIT, NOT_CONST_INIT };
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+// -------------------------------------------------------------------------
+// Code generation state
+
+// The state is passed down the AST by the code generator (and back up, in
+// the form of the state of the label pair).  It is threaded through the
+// call stack.  Constructing a state implicitly pushes it on the owning code
+// generator's stack of states, and destroying one implicitly pops it.
+
+class CodeGenState BASE_EMBEDDED {
+ public:
+  // Create an initial code generator state.  Destroying the initial state
+  // leaves the code generator with a NULL state.
+  explicit CodeGenState(CodeGenerator* owner);
+
+  // Create a code generator state based on a code generator's current
+  // state.  The new state has its own typeof state and pair of branch
+  // labels.
+  CodeGenState(CodeGenerator* owner,
+               JumpTarget* true_target,
+               JumpTarget* false_target);
+
+  // Destroy a code generator state and restore the owning code generator's
+  // previous state.
+  ~CodeGenState();
+
+  TypeofState typeof_state() const { return typeof_state_; }
+  JumpTarget* true_target() const { return true_target_; }
+  JumpTarget* false_target() const { return false_target_; }
+
+ private:
+  // The owning code generator.
+  CodeGenerator* owner_;
+
+  // A flag indicating whether we are compiling the immediate subexpression
+  // of a typeof expression.
+  TypeofState typeof_state_;
+
+  JumpTarget* true_target_;
+  JumpTarget* false_target_;
+
+  // The previous state of the owning code generator, restored when
+  // this state is destroyed.
+  CodeGenState* previous_;
+};
+
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator
+
+class CodeGenerator: public AstVisitor {
+ public:
+  // Compilation mode.  Either the compiler is used as the primary
+  // compiler and needs to setup everything or the compiler is used as
+  // the secondary compiler for split compilation and has to handle
+  // bailouts.
+  enum Mode {
+    PRIMARY,
+    SECONDARY
+  };
+
+  // Takes a function literal, generates code for it. This function should only
+  // be called by compiler.cc.
+  static Handle<Code> MakeCode(CompilationInfo* info);
+
+  // Printing of AST, etc. as requested by flags.
+  static void MakeCodePrologue(CompilationInfo* info);
+
+  // Allocate and install the code.
+  static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
+                                       Code::Flags flags,
+                                       CompilationInfo* info);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  static bool ShouldGenerateLog(Expression* type);
+#endif
+
+  static void SetFunctionInfo(Handle<JSFunction> fun,
+                              FunctionLiteral* lit,
+                              bool is_toplevel,
+                              Handle<Script> script);
+
+  static void RecordPositions(MacroAssembler* masm, int pos);
+
+  // Accessors
+  MacroAssembler* masm() { return masm_; }
+  VirtualFrame* frame() const { return frame_; }
+  inline Handle<Script> script();
+
+  bool has_valid_frame() const { return frame_ != NULL; }
+
+  // Set the virtual frame to be new_frame, with non-frame register
+  // reference counts given by non_frame_registers.  The non-frame
+  // register reference counts of the old frame are returned in
+  // non_frame_registers.
+  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
+
+  void DeleteFrame();
+
+  RegisterAllocator* allocator() const { return allocator_; }
+
+  CodeGenState* state() { return state_; }
+  void set_state(CodeGenState* state) { state_ = state; }
+
+  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+
+  static const int kUnknownIntValue = -1;
+
+  // Number of instructions used for the JS return sequence. The constant is
+  // used by the debugger to patch the JS return sequence.
+  static const int kJSReturnSequenceLength = 6;
+
+ private:
+  // Construction/Destruction.
+  explicit CodeGenerator(MacroAssembler* masm);
+  virtual ~CodeGenerator() { delete masm_; }
+
+  // Accessors.
+  inline bool is_eval();
+  Scope* scope() const { return scope_; }
+
+  // Generating deferred code.
+  void ProcessDeferred();
+
+  // State
+  bool has_cc() const  { return cc_reg_ != cc_always; }
+  TypeofState typeof_state() const { return state_->typeof_state(); }
+  JumpTarget* true_target() const  { return state_->true_target(); }
+  JumpTarget* false_target() const  { return state_->false_target(); }
+
+  // We don't track loop nesting level on mips yet.
+  int loop_nesting() const { return 0; }
+
+  // Node visitors.
+  void VisitStatements(ZoneList<Statement*>* statements);
+
+#define DEF_VISIT(type) \
+  void Visit##type(type* node);
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+  // Main code generation function
+  void Generate(CompilationInfo* info, Mode mode);
+
+  struct InlineRuntimeLUT {
+    void (CodeGenerator::*method)(ZoneList<Expression*>*);
+    const char* name;
+  };
+
+  static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
+  bool CheckForInlineRuntimeCall(CallRuntime* node);
+  static bool PatchInlineRuntimeEntry(Handle<String> name,
+                                      const InlineRuntimeLUT& new_entry,
+                                      InlineRuntimeLUT* old_entry);
+
+  static Handle<Code> ComputeLazyCompile(int argc);
+  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
+
+  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+
+  // Declare global variables and functions in the given array of
+  // name/value pairs.
+  void DeclareGlobals(Handle<FixedArray> pairs);
+
+  // Support for type checks.
+  void GenerateIsSmi(ZoneList<Expression*>* args);
+  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
+  void GenerateIsArray(ZoneList<Expression*>* args);
+
+  // Support for construct call checks.
+  void GenerateIsConstructCall(ZoneList<Expression*>* args);
+
+  // Support for arguments.length and arguments[?].
+  void GenerateArgumentsLength(ZoneList<Expression*>* args);
+  void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+
+  // Support for accessing the class and value fields of an object.
+  void GenerateClassOf(ZoneList<Expression*>* args);
+  void GenerateValueOf(ZoneList<Expression*>* args);
+  void GenerateSetValueOf(ZoneList<Expression*>* args);
+
+  // Fast support for charCodeAt(n).
+  void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+
+  // Fast support for object equality testing.
+  void GenerateObjectEquals(ZoneList<Expression*>* args);
+
+  void GenerateLog(ZoneList<Expression*>* args);
+
+  // Fast support for Math.random().
+  void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+
+  void GenerateIsObject(ZoneList<Expression*>* args);
+  void GenerateIsFunction(ZoneList<Expression*>* args);
+  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
+  void GenerateStringAdd(ZoneList<Expression*>* args);
+  void GenerateSubString(ZoneList<Expression*>* args);
+  void GenerateStringCompare(ZoneList<Expression*>* args);
+  void GenerateRegExpExec(ZoneList<Expression*>* args);
+
+  // Fast support for Math.sin and Math.cos.
+  inline void GenerateMathSin(ZoneList<Expression*>* args);
+  inline void GenerateMathCos(ZoneList<Expression*>* args);
+
+  // Simple condition analysis.
+  enum ConditionAnalysis {
+    ALWAYS_TRUE,
+    ALWAYS_FALSE,
+    DONT_KNOW
+  };
+  ConditionAnalysis AnalyzeCondition(Expression* cond);
+
+  // Methods used to indicate which source code is generated for. Source
+  // positions are collected by the assembler and emitted with the relocation
+  // information.
+  void CodeForFunctionPosition(FunctionLiteral* fun);
+  void CodeForReturnPosition(FunctionLiteral* fun);
+  void CodeForStatementPosition(Statement* node);
+  void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
+  void CodeForSourcePosition(int pos);
+
+#ifdef DEBUG
+  // True if the registers are valid for entry to a block.
+  bool HasValidEntryRegisters();
+#endif
+
+  bool is_eval_;  // Tells whether code is generated for eval.
+
+  Handle<Script> script_;
+  List<DeferredCode*> deferred_;
+
+  // Assembler
+  MacroAssembler* masm_;  // to generate code
+
+  CompilationInfo* info_;
+
+  // Code generation state
+  Scope* scope_;
+  VirtualFrame* frame_;
+  RegisterAllocator* allocator_;
+  Condition cc_reg_;
+  CodeGenState* state_;
+
+  // Jump targets
+  BreakTarget function_return_;
+
+  // True if the function return is shadowed (ie, jumping to the target
+  // function_return_ does not jump to the true function return, but rather
+  // to some unlinking code).
+  bool function_return_is_shadowed_;
+
+  static InlineRuntimeLUT kInlineRuntimeLUT[];
+
+  friend class VirtualFrame;
+  friend class JumpTarget;
+  friend class Reference;
+  friend class FastCodeGenerator;
+  friend class FullCodeGenSyntaxChecker;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_CODEGEN_MIPS_H_
+
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
new file mode 100644
index 0000000..a5ef9f8
--- /dev/null
+++ b/src/mips/constants-mips.cc
@@ -0,0 +1,323 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "constants-mips.h"
+
+namespace assembler {
+namespace mips {
+
+namespace v8i = v8::internal;
+
+
+// -----------------------------------------------------------------------------
+// Registers
+
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumSimuRegisters] = {
+  "zero_reg",
+  "at",
+  "v0", "v1",
+  "a0", "a1", "a2", "a3",
+  "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+  "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+  "t8", "t9",
+  "k0", "k1",
+  "gp",
+  "sp",
+  "fp",
+  "ra",
+  "LO", "HI",
+  "pc"
+};
+
+// List of alias names which can be used when referring to MIPS registers.
+const Registers::RegisterAlias Registers::aliases_[] = {
+  {0, "zero"},
+  {23, "cp"},
+  {30, "s8"},
+  {30, "s8_fp"},
+  {kInvalidRegister, NULL}
+};
+
+const char* Registers::Name(int reg) {
+  const char* result;
+  if ((0 <= reg) && (reg < kNumSimuRegisters)) {
+    result = names_[reg];
+  } else {
+    result = "noreg";
+  }
+  return result;
+}
+
+
+int Registers::Number(const char* name) {
+  // Look through the canonical names.
+  for (int i = 0; i < kNumSimuRegisters; i++) {
+    if (strcmp(names_[i], name) == 0) {
+      return i;
+    }
+  }
+
+  // Look through the alias names.
+  int i = 0;
+  while (aliases_[i].reg != kInvalidRegister) {
+    if (strcmp(aliases_[i].name, name) == 0) {
+      return aliases_[i].reg;
+    }
+    i++;
+  }
+
+  // No register with the reguested name found.
+  return kInvalidRegister;
+}
+
+
+const char* FPURegister::names_[kNumFPURegister] = {
+  "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
+  "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
+  "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
+};
+
+// List of alias names which can be used when referring to MIPS registers.
+const FPURegister::RegisterAlias FPURegister::aliases_[] = {
+  {kInvalidRegister, NULL}
+};
+
+const char* FPURegister::Name(int creg) {
+  const char* result;
+  if ((0 <= creg) && (creg < kNumFPURegister)) {
+    result = names_[creg];
+  } else {
+    result = "nocreg";
+  }
+  return result;
+}
+
+
+int FPURegister::Number(const char* name) {
+  // Look through the canonical names.
+  for (int i = 0; i < kNumSimuRegisters; i++) {
+    if (strcmp(names_[i], name) == 0) {
+      return i;
+    }
+  }
+
+  // Look through the alias names.
+  int i = 0;
+  while (aliases_[i].creg != kInvalidRegister) {
+    if (strcmp(aliases_[i].name, name) == 0) {
+      return aliases_[i].creg;
+    }
+    i++;
+  }
+
+  // No Cregister with the reguested name found.
+  return kInvalidFPURegister;
+}
+
+
+// -----------------------------------------------------------------------------
+// Instruction
+
+bool Instruction::IsForbiddenInBranchDelay() {
+  int op = OpcodeFieldRaw();
+  switch (op) {
+    case J:
+    case JAL:
+    case BEQ:
+    case BNE:
+    case BLEZ:
+    case BGTZ:
+    case BEQL:
+    case BNEL:
+    case BLEZL:
+    case BGTZL:
+      return true;
+    case REGIMM:
+      switch (RtFieldRaw()) {
+        case BLTZ:
+        case BGEZ:
+        case BLTZAL:
+        case BGEZAL:
+          return true;
+        default:
+          return false;
+      };
+      break;
+    case SPECIAL:
+      switch (FunctionFieldRaw()) {
+        case JR:
+        case JALR:
+          return true;
+        default:
+          return false;
+      };
+      break;
+    default:
+      return false;
+  };
+}
+
+
+bool Instruction::IsLinkingInstruction() {
+  int op = OpcodeFieldRaw();
+  switch (op) {
+    case JAL:
+    case BGEZAL:
+    case BLTZAL:
+      return true;
+    case SPECIAL:
+      switch (FunctionFieldRaw()) {
+        case JALR:
+          return true;
+        default:
+          return false;
+      };
+    default:
+      return false;
+  };
+}
+
+
+bool Instruction::IsTrap() {
+  if (OpcodeFieldRaw() != SPECIAL) {
+    return false;
+  } else {
+    switch (FunctionFieldRaw()) {
+      case BREAK:
+      case TGE:
+      case TGEU:
+      case TLT:
+      case TLTU:
+      case TEQ:
+      case TNE:
+        return true;
+      default:
+        return false;
+    };
+  }
+}
+
+
+Instruction::Type Instruction::InstructionType() const {
+  switch (OpcodeFieldRaw()) {
+    case SPECIAL:
+      switch (FunctionFieldRaw()) {
+        case JR:
+        case JALR:
+        case BREAK:
+        case SLL:
+        case SRL:
+        case SRA:
+        case SLLV:
+        case SRLV:
+        case SRAV:
+        case MFHI:
+        case MFLO:
+        case MULT:
+        case MULTU:
+        case DIV:
+        case DIVU:
+        case ADD:
+        case ADDU:
+        case SUB:
+        case SUBU:
+        case AND:
+        case OR:
+        case XOR:
+        case NOR:
+        case SLT:
+        case SLTU:
+        case TGE:
+        case TGEU:
+        case TLT:
+        case TLTU:
+        case TEQ:
+        case TNE:
+          return kRegisterType;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    case SPECIAL2:
+      switch (FunctionFieldRaw()) {
+        case MUL:
+          return kRegisterType;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    case COP1:    // Coprocessor instructions
+      switch (FunctionFieldRaw()) {
+        case BC1:   // branch on coprocessor condition
+          return kImmediateType;
+        default:
+          return kRegisterType;
+      };
+      break;
+    // 16 bits Immediate type instructions. eg: addi dest, src, imm16
+    case REGIMM:
+    case BEQ:
+    case BNE:
+    case BLEZ:
+    case BGTZ:
+    case ADDI:
+    case ADDIU:
+    case SLTI:
+    case SLTIU:
+    case ANDI:
+    case ORI:
+    case XORI:
+    case LUI:
+    case BEQL:
+    case BNEL:
+    case BLEZL:
+    case BGTZL:
+    case LB:
+    case LW:
+    case LBU:
+    case SB:
+    case SW:
+    case LWC1:
+    case LDC1:
+    case SWC1:
+    case SDC1:
+      return kImmediateType;
+    // 26 bits immediate type instructions. eg: j imm26
+    case J:
+    case JAL:
+      return kJumpType;
+    default:
+      UNREACHABLE();
+  };
+  return kUnsupported;
+}
+
+} }   // namespace assembler::mips
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
new file mode 100644
index 0000000..d0fdf88
--- /dev/null
+++ b/src/mips/constants-mips.h
@@ -0,0 +1,525 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef  V8_MIPS_CONSTANTS_H_
+#define  V8_MIPS_CONSTANTS_H_
+
+#include "checks.h"
+
+// UNIMPLEMENTED_ macro for MIPS.
+#define UNIMPLEMENTED_MIPS()                                                  \
+  v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n",    \
+                       __FILE__, __LINE__, __func__)
+#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
+
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate MIPS32 instructions.
+//
+// See: MIPS32 Architecture For Programmers
+//      Volume II: The MIPS32 Instruction Set
+// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
+
+namespace assembler {
+namespace mips {
+
+// -----------------------------------------------------------------------------
+// Registers and FPURegister.
+
+// Number of general purpose registers.
+static const int kNumRegisters = 32;
+static const int kInvalidRegister = -1;
+
+// Number of registers with HI, LO, and pc.
+static const int kNumSimuRegisters = 35;
+
+// In the simulator, the PC register is simulated as the 34th register.
+static const int kPCRegister = 34;
+
+// Number coprocessor registers.
+static const int kNumFPURegister = 32;
+static const int kInvalidFPURegister = -1;
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+  // Return the name of the register.
+  static const char* Name(int reg);
+
+  // Lookup the register number for the name provided.
+  static int Number(const char* name);
+
+  struct RegisterAlias {
+    int reg;
+    const char *name;
+  };
+
+  static const int32_t kMaxValue = 0x7fffffff;
+  static const int32_t kMinValue = 0x80000000;
+
+ private:
+
+  static const char* names_[kNumSimuRegisters];
+  static const RegisterAlias aliases_[];
+};
+
+// Helper functions for converting between register numbers and names.
+class FPURegister {
+ public:
+  // Return the name of the register.
+  static const char* Name(int reg);
+
+  // Lookup the register number for the name provided.
+  static int Number(const char* name);
+
+  struct RegisterAlias {
+    int creg;
+    const char *name;
+  };
+
+ private:
+
+  static const char* names_[kNumFPURegister];
+  static const RegisterAlias aliases_[];
+};
+
+
+// -----------------------------------------------------------------------------
+// Instructions encoding constants.
+
+// On MIPS all instructions are 32 bits.
+typedef int32_t Instr;
+
+typedef unsigned char byte_;
+
+// Special Software Interrupt codes when used in the presence of the MIPS
+// simulator.
+enum SoftwareInterruptCodes {
+  // Transition to C code.
+  call_rt_redirected = 0xfffff
+};
+
+// ----- Fields offset and length.
+static const int kOpcodeShift   = 26;
+static const int kOpcodeBits    = 6;
+static const int kRsShift       = 21;
+static const int kRsBits        = 5;
+static const int kRtShift       = 16;
+static const int kRtBits        = 5;
+static const int kRdShift       = 11;
+static const int kRdBits        = 5;
+static const int kSaShift       = 6;
+static const int kSaBits        = 5;
+static const int kFunctionShift = 0;
+static const int kFunctionBits  = 6;
+
+static const int kImm16Shift = 0;
+static const int kImm16Bits  = 16;
+static const int kImm26Shift = 0;
+static const int kImm26Bits  = 26;
+
+static const int kFsShift       = 11;
+static const int kFsBits        = 5;
+static const int kFtShift       = 16;
+static const int kFtBits        = 5;
+
+// ----- Miscellianous useful masks.
+// Instruction bit masks.
+static const int  kOpcodeMask   = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
+static const int  kImm16Mask    = ((1 << kImm16Bits) - 1) << kImm16Shift;
+static const int  kImm26Mask    = ((1 << kImm26Bits) - 1) << kImm26Shift;
+static const int  kRsFieldMask  = ((1 << kRsBits) - 1) << kRsShift;
+static const int  kRtFieldMask  = ((1 << kRtBits) - 1) << kRtShift;
+static const int  kRdFieldMask  = ((1 << kRdBits) - 1) << kRdShift;
+static const int  kSaFieldMask  = ((1 << kSaBits) - 1) << kSaShift;
+static const int  kFunctionFieldMask =
+    ((1 << kFunctionBits) - 1) << kFunctionShift;
+// Misc masks.
+static const int  HIMask        =   0xffff << 16;
+static const int  LOMask        =   0xffff;
+static const int  signMask      =   0x80000000;
+
+
+// ----- MIPS Opcodes and Function Fields.
+// We use this presentation to stay close to the table representation in
+// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
+enum Opcode {
+  SPECIAL   =   0 << kOpcodeShift,
+  REGIMM    =   1 << kOpcodeShift,
+
+  J         =   ((0 << 3) + 2) << kOpcodeShift,
+  JAL       =   ((0 << 3) + 3) << kOpcodeShift,
+  BEQ       =   ((0 << 3) + 4) << kOpcodeShift,
+  BNE       =   ((0 << 3) + 5) << kOpcodeShift,
+  BLEZ      =   ((0 << 3) + 6) << kOpcodeShift,
+  BGTZ      =   ((0 << 3) + 7) << kOpcodeShift,
+
+  ADDI      =   ((1 << 3) + 0) << kOpcodeShift,
+  ADDIU     =   ((1 << 3) + 1) << kOpcodeShift,
+  SLTI      =   ((1 << 3) + 2) << kOpcodeShift,
+  SLTIU     =   ((1 << 3) + 3) << kOpcodeShift,
+  ANDI      =   ((1 << 3) + 4) << kOpcodeShift,
+  ORI       =   ((1 << 3) + 5) << kOpcodeShift,
+  XORI      =   ((1 << 3) + 6) << kOpcodeShift,
+  LUI       =   ((1 << 3) + 7) << kOpcodeShift,
+
+  COP1      =   ((2 << 3) + 1) << kOpcodeShift,  // Coprocessor 1 class
+  BEQL      =   ((2 << 3) + 4) << kOpcodeShift,
+  BNEL      =   ((2 << 3) + 5) << kOpcodeShift,
+  BLEZL     =   ((2 << 3) + 6) << kOpcodeShift,
+  BGTZL     =   ((2 << 3) + 7) << kOpcodeShift,
+
+  SPECIAL2  =   ((3 << 3) + 4) << kOpcodeShift,
+
+  LB        =   ((4 << 3) + 0) << kOpcodeShift,
+  LW        =   ((4 << 3) + 3) << kOpcodeShift,
+  LBU       =   ((4 << 3) + 4) << kOpcodeShift,
+  SB        =   ((5 << 3) + 0) << kOpcodeShift,
+  SW        =   ((5 << 3) + 3) << kOpcodeShift,
+
+  LWC1      =   ((6 << 3) + 1) << kOpcodeShift,
+  LDC1      =   ((6 << 3) + 5) << kOpcodeShift,
+
+  SWC1      =   ((7 << 3) + 1) << kOpcodeShift,
+  SDC1      =   ((7 << 3) + 5) << kOpcodeShift
+};
+
+enum SecondaryField {
+  // SPECIAL Encoding of Function Field.
+  SLL       =   ((0 << 3) + 0),
+  SRL       =   ((0 << 3) + 2),
+  SRA       =   ((0 << 3) + 3),
+  SLLV      =   ((0 << 3) + 4),
+  SRLV      =   ((0 << 3) + 6),
+  SRAV      =   ((0 << 3) + 7),
+
+  JR        =   ((1 << 3) + 0),
+  JALR      =   ((1 << 3) + 1),
+  BREAK     =   ((1 << 3) + 5),
+
+  MFHI      =   ((2 << 3) + 0),
+  MFLO      =   ((2 << 3) + 2),
+
+  MULT      =   ((3 << 3) + 0),
+  MULTU     =   ((3 << 3) + 1),
+  DIV       =   ((3 << 3) + 2),
+  DIVU      =   ((3 << 3) + 3),
+
+  ADD       =   ((4 << 3) + 0),
+  ADDU      =   ((4 << 3) + 1),
+  SUB       =   ((4 << 3) + 2),
+  SUBU      =   ((4 << 3) + 3),
+  AND       =   ((4 << 3) + 4),
+  OR        =   ((4 << 3) + 5),
+  XOR       =   ((4 << 3) + 6),
+  NOR       =   ((4 << 3) + 7),
+
+  SLT       =   ((5 << 3) + 2),
+  SLTU      =   ((5 << 3) + 3),
+
+  TGE       =   ((6 << 3) + 0),
+  TGEU      =   ((6 << 3) + 1),
+  TLT       =   ((6 << 3) + 2),
+  TLTU      =   ((6 << 3) + 3),
+  TEQ       =   ((6 << 3) + 4),
+  TNE       =   ((6 << 3) + 6),
+
+  // SPECIAL2 Encoding of Function Field.
+  MUL       =   ((0 << 3) + 2),
+
+  // REGIMM  encoding of rt Field.
+  BLTZ      =   ((0 << 3) + 0) << 16,
+  BGEZ      =   ((0 << 3) + 1) << 16,
+  BLTZAL    =   ((2 << 3) + 0) << 16,
+  BGEZAL    =   ((2 << 3) + 1) << 16,
+
+  // COP1 Encoding of rs Field.
+  MFC1      =   ((0 << 3) + 0) << 21,
+  MFHC1     =   ((0 << 3) + 3) << 21,
+  MTC1      =   ((0 << 3) + 4) << 21,
+  MTHC1     =   ((0 << 3) + 7) << 21,
+  BC1       =   ((1 << 3) + 0) << 21,
+  S         =   ((2 << 3) + 0) << 21,
+  D         =   ((2 << 3) + 1) << 21,
+  W         =   ((2 << 3) + 4) << 21,
+  L         =   ((2 << 3) + 5) << 21,
+  PS        =   ((2 << 3) + 6) << 21,
+  // COP1 Encoding of Function Field When rs=S.
+  CVT_D_S   =   ((4 << 3) + 1),
+  CVT_W_S   =   ((4 << 3) + 4),
+  CVT_L_S   =   ((4 << 3) + 5),
+  CVT_PS_S  =   ((4 << 3) + 6),
+  // COP1 Encoding of Function Field When rs=D.
+  CVT_S_D   =   ((4 << 3) + 0),
+  CVT_W_D   =   ((4 << 3) + 4),
+  CVT_L_D   =   ((4 << 3) + 5),
+  // COP1 Encoding of Function Field When rs=W or L.
+  CVT_S_W   =   ((4 << 3) + 0),
+  CVT_D_W   =   ((4 << 3) + 1),
+  CVT_S_L   =   ((4 << 3) + 0),
+  CVT_D_L   =   ((4 << 3) + 1),
+  // COP1 Encoding of Function Field When rs=PS.
+
+  NULLSF    =   0
+};
+
+
+// ----- Emulated conditions.
+// On MIPS we use this enum to abstract from conditionnal branch instructions.
+// the 'U' prefix is used to specify unsigned comparisons.
+enum Condition {
+  // Any value < 0 is considered no_condition.
+  no_condition  = -1,
+
+  overflow      =  0,
+  no_overflow   =  1,
+  Uless         =  2,
+  Ugreater_equal=  3,
+  equal         =  4,
+  not_equal     =  5,
+  Uless_equal   =  6,
+  Ugreater      =  7,
+  negative      =  8,
+  positive      =  9,
+  parity_even   = 10,
+  parity_odd    = 11,
+  less          = 12,
+  greater_equal = 13,
+  less_equal    = 14,
+  greater       = 15,
+
+  cc_always     = 16,
+
+  // aliases
+  carry         = Uless,
+  not_carry     = Ugreater_equal,
+  zero          = equal,
+  eq            = equal,
+  not_zero      = not_equal,
+  ne            = not_equal,
+  sign          = negative,
+  not_sign      = positive,
+
+  cc_default    = no_condition
+};
+
+// ----- Coprocessor conditions.
+enum FPUCondition {
+  F,    // False
+  UN,   // Unordered
+  EQ,   // Equal
+  UEQ,  // Unordered or Equal
+  OLT,  // Ordered or Less Than
+  ULT,  // Unordered or Less Than
+  OLE,  // Ordered or Less Than or Equal
+  ULE   // Unordered or Less Than or Equal
+};
+
+
+// Break 0xfffff, reserved for redirected real time call.
+const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
+// A nop instruction. (Encoding of sll 0 0 0).
+const Instr nopInstr = 0;
+
+class Instruction {
+ public:
+  enum {
+    kInstructionSize = 4,
+    kInstructionSizeLog2 = 2,
+    // On MIPS PC cannot actually be directly accessed. We behave as if PC was
+    // always the value of the current instruction being exectued.
+    kPCReadOffset = 0
+  };
+
+  // Get the raw instruction bits.
+  inline Instr InstructionBits() const {
+    return *reinterpret_cast<const Instr*>(this);
+  }
+
+  // Set the raw instruction bits to value.
+  inline void SetInstructionBits(Instr value) {
+    *reinterpret_cast<Instr*>(this) = value;
+  }
+
+  // Read one particular bit out of the instruction bits.
+  inline int Bit(int nr) const {
+    return (InstructionBits() >> nr) & 1;
+  }
+
+  // Read a bit field out of the instruction bits.
+  inline int Bits(int hi, int lo) const {
+    return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+  }
+
+  // Instruction type.
+  enum Type {
+    kRegisterType,
+    kImmediateType,
+    kJumpType,
+    kUnsupported = -1
+  };
+
+  // Get the encoding type of the instruction.
+  Type InstructionType() const;
+
+
+  // Accessors for the different named fields used in the MIPS encoding.
+  inline Opcode OpcodeField() const {
+    return static_cast<Opcode>(
+        Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
+  }
+
+  inline int RsField() const {
+    ASSERT(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return Bits(kRsShift + kRsBits - 1, kRsShift);
+  }
+
+  inline int RtField() const {
+    ASSERT(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return Bits(kRtShift + kRtBits - 1, kRtShift);
+  }
+
+  inline int RdField() const {
+    ASSERT(InstructionType() == kRegisterType);
+    return Bits(kRdShift + kRdBits - 1, kRdShift);
+  }
+
+  inline int SaField() const {
+    ASSERT(InstructionType() == kRegisterType);
+    return Bits(kSaShift + kSaBits - 1, kSaShift);
+  }
+
+  inline int FunctionField() const {
+    ASSERT(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
+  }
+
+  inline int FsField() const {
+    return Bits(kFsShift + kRsBits - 1, kFsShift);
+  }
+
+  inline int FtField() const {
+    return Bits(kFtShift + kRsBits - 1, kFtShift);
+  }
+
+  // Return the fields at their original place in the instruction encoding.
+  inline Opcode OpcodeFieldRaw() const {
+    return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
+  }
+
+  inline int RsFieldRaw() const {
+    ASSERT(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return InstructionBits() & kRsFieldMask;
+  }
+
+  inline int RtFieldRaw() const {
+    ASSERT(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return InstructionBits() & kRtFieldMask;
+  }
+
+  inline int RdFieldRaw() const {
+    ASSERT(InstructionType() == kRegisterType);
+    return InstructionBits() & kRdFieldMask;
+  }
+
+  inline int SaFieldRaw() const {
+    ASSERT(InstructionType() == kRegisterType);
+    return InstructionBits() & kSaFieldMask;
+  }
+
+  inline int FunctionFieldRaw() const {
+    return InstructionBits() & kFunctionFieldMask;
+  }
+
+  // Get the secondary field according to the opcode.
+  inline int SecondaryField() const {
+    Opcode op = OpcodeFieldRaw();
+    switch (op) {
+      case SPECIAL:
+      case SPECIAL2:
+        return FunctionField();
+      case COP1:
+        return RsField();
+      case REGIMM:
+        return RtField();
+      default:
+        return NULLSF;
+    }
+  }
+
+  inline int32_t Imm16Field() const {
+    ASSERT(InstructionType() == kImmediateType);
+    return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
+  }
+
+  inline int32_t Imm26Field() const {
+    ASSERT(InstructionType() == kJumpType);
+    return Bits(kImm16Shift + kImm26Bits - 1, kImm26Shift);
+  }
+
+  // Say if the instruction should not be used in a branch delay slot.
+  bool IsForbiddenInBranchDelay();
+  // Say if the instruction 'links'. eg: jal, bal.
+  bool IsLinkingInstruction();
+  // Say if the instruction is a break or a trap.
+  bool IsTrap();
+
+  // Instructions are read of out a code stream. The only way to get a
+  // reference to an instruction is to convert a pointer. There is no way
+  // to allocate or create instances of class Instruction.
+  // Use the At(pc) function to create references to Instruction.
+  static Instruction* At(byte_* pc) {
+    return reinterpret_cast<Instruction*>(pc);
+  }
+
+ private:
+  // We need to prevent the creation of instances of class Instruction.
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
+};
+
+
+// -----------------------------------------------------------------------------
+// MIPS assembly various constants.
+
+static const int kArgsSlotsSize  = 4 * Instruction::kInstructionSize;
+static const int kArgsSlotsNum   = 4;
+
+static const int kBranchReturnOffset = 2 * Instruction::kInstructionSize;
+
+static const int kDoubleAlignment = 2 * 8;
+static const int kDoubleAlignmentMask = kDoubleAlignmentMask - 1;
+
+
+} }   // namespace assembler::mips
+
+#endif    // #ifndef V8_MIPS_CONSTANTS_H_
+
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
new file mode 100644
index 0000000..f592257
--- /dev/null
+++ b/src/mips/cpu-mips.cc
@@ -0,0 +1,69 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for arm independent of OS goes here.
+
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#ifdef __mips
+#include <asm/cachectl.h>
+#endif  // #ifdef __mips
+
+#include "v8.h"
+#include "cpu.h"
+
+namespace v8 {
+namespace internal {
+
+void CPU::Setup() {
+  // Nothing to do.
+}
+
+void CPU::FlushICache(void* start, size_t size) {
+#ifdef __mips
+  int res;
+
+  // See http://www.linux-mips.org/wiki/Cacheflush_Syscall
+  res = syscall(__NR_cacheflush, start, size, ICACHE);
+
+  if (res) {
+    V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
+  }
+
+#endif    // #ifdef __mips
+}
+
+
+void CPU::DebugBreak() {
+#ifdef __mips
+  asm volatile("break");
+#endif  // #ifdef __mips
+}
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
new file mode 100644
index 0000000..772bcc0
--- /dev/null
+++ b/src/mips/debug-mips.cc
@@ -0,0 +1,112 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+  return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Restore the JS frame exit code.
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// A debug break in the exit code is identified by a call.
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  return rinfo->IsPatchedReturnSequence();
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
new file mode 100644
index 0000000..cab72d1
--- /dev/null
+++ b/src/mips/disasm-mips.cc
@@ -0,0 +1,784 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+//   NameConverter converter;
+//   Disassembler d(converter);
+//   for (byte_* pc = begin; pc < end;) {
+//     char buffer[128];
+//     buffer[0] = '\0';
+//     byte_* prev_pc = pc;
+//     pc += d.InstructionDecode(buffer, sizeof buffer, pc);
+//     printf("%p    %08x      %s\n",
+//            prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+//   }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#ifndef WIN32
+#include <stdint.h>
+#endif
+
+#include "v8.h"
+
+#include "constants-mips.h"
+#include "disasm.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+namespace assembler {
+namespace mips {
+
+
+namespace v8i = v8::internal;
+
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+  Decoder(const disasm::NameConverter& converter,
+          v8::internal::Vector<char> out_buffer)
+    : converter_(converter),
+      out_buffer_(out_buffer),
+      out_buffer_pos_(0) {
+    out_buffer_[out_buffer_pos_] = '\0';
+  }
+
+  ~Decoder() {}
+
+  // Writes one disassembled instruction into 'buffer' (0-terminated).
+  // Returns the length of the disassembled machine instruction in bytes.
+  int InstructionDecode(byte_* instruction);
+
+ private:
+  // Bottleneck functions to print into the out_buffer.
+  void PrintChar(const char ch);
+  void Print(const char* str);
+
+  // Printing of common values.
+  void PrintRegister(int reg);
+  void PrintCRegister(int creg);
+  void PrintRs(Instruction* instr);
+  void PrintRt(Instruction* instr);
+  void PrintRd(Instruction* instr);
+  void PrintFs(Instruction* instr);
+  void PrintFt(Instruction* instr);
+  void PrintFd(Instruction* instr);
+  void PrintSa(Instruction* instr);
+  void PrintFunction(Instruction* instr);
+  void PrintSecondaryField(Instruction* instr);
+  void PrintUImm16(Instruction* instr);
+  void PrintSImm16(Instruction* instr);
+  void PrintXImm16(Instruction* instr);
+  void PrintImm26(Instruction* instr);
+  void PrintCode(Instruction* instr);   // For break and trap instructions.
+  // Printing of instruction name.
+  void PrintInstructionName(Instruction* instr);
+
+  // Handle formatting of instructions and their options.
+  int FormatRegister(Instruction* instr, const char* option);
+  int FormatCRegister(Instruction* instr, const char* option);
+  int FormatOption(Instruction* instr, const char* option);
+  void Format(Instruction* instr, const char* format);
+  void Unknown(Instruction* instr);
+
+  // Each of these functions decodes one particular instruction type.
+  void DecodeTypeRegister(Instruction* instr);
+  void DecodeTypeImmediate(Instruction* instr);
+  void DecodeTypeJump(Instruction* instr);
+
+  const disasm::NameConverter& converter_;
+  v8::internal::Vector<char> out_buffer_;
+  int out_buffer_pos_;
+
+  DISALLOW_COPY_AND_ASSIGN(Decoder);
+};
+
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+  (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) {
+  out_buffer_[out_buffer_pos_++] = ch;
+}
+
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+  char cur = *str++;
+  while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+    PrintChar(cur);
+    cur = *str++;
+  }
+  out_buffer_[out_buffer_pos_] = 0;
+}
+
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+  Print(converter_.NameOfCPURegister(reg));
+}
+
+
+void Decoder::PrintRs(Instruction* instr) {
+  int reg = instr->RsField();
+  PrintRegister(reg);
+}
+
+
+void Decoder::PrintRt(Instruction* instr) {
+  int reg = instr->RtField();
+  PrintRegister(reg);
+}
+
+
+void Decoder::PrintRd(Instruction* instr) {
+  int reg = instr->RdField();
+  PrintRegister(reg);
+}
+
+
+// Print the Cregister name according to the active name converter.
+void Decoder::PrintCRegister(int creg) {
+  Print(converter_.NameOfXMMRegister(creg));
+}
+
+
+void Decoder::PrintFs(Instruction* instr) {
+  int creg = instr->RsField();
+  PrintCRegister(creg);
+}
+
+
+void Decoder::PrintFt(Instruction* instr) {
+  int creg = instr->RtField();
+  PrintCRegister(creg);
+}
+
+
+void Decoder::PrintFd(Instruction* instr) {
+  int creg = instr->RdField();
+  PrintCRegister(creg);
+}
+
+
+// Print the integer value of the sa field.
+void Decoder::PrintSa(Instruction* instr) {
+  int sa = instr->SaField();
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "%d", sa);
+}
+
+
+// Print 16-bit unsigned immediate value.
+void Decoder::PrintUImm16(Instruction* instr) {
+  int32_t imm = instr->Imm16Field();
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "%u", imm);
+}
+
+
+// Print 16-bit signed immediate value.
+void Decoder::PrintSImm16(Instruction* instr) {
+  int32_t imm = ((instr->Imm16Field())<<16)>>16;
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "%d", imm);
+}
+
+
+// Print 16-bit hexa immediate value.
+void Decoder::PrintXImm16(Instruction* instr) {
+  int32_t imm = instr->Imm16Field();
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "0x%x", imm);
+}
+
+
+// Print 26-bit immediate value.
+void Decoder::PrintImm26(Instruction* instr) {
+  int32_t imm = instr->Imm26Field();
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "%d", imm);
+}
+
+
+// Print 26-bit immediate value.
+void Decoder::PrintCode(Instruction* instr) {
+  if (instr->OpcodeFieldRaw() != SPECIAL)
+    return;  // Not a break or trap instruction.
+  switch (instr->FunctionFieldRaw()) {
+    case BREAK: {
+      int32_t code = instr->Bits(25, 6);
+      out_buffer_pos_ +=
+          v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%05x", code);
+      break;
+                }
+    case TGE:
+    case TGEU:
+    case TLT:
+    case TLTU:
+    case TEQ:
+    case TNE: {
+      int32_t code = instr->Bits(15, 6);
+      out_buffer_pos_ +=
+          v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
+      break;
+    }
+    default:  // Not a break or trap instruction.
+    break;
+  };
+}
+
+
+// Printing of instruction name.
+void Decoder::PrintInstructionName(Instruction* instr) {
+}
+
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+  ASSERT(format[0] == 'r');
+  if (format[1] == 's') {  // 'rs: Rs register
+    int reg = instr->RsField();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == 't') {  // 'rt: rt register
+    int reg = instr->RtField();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == 'd') {  // 'rd: rd register
+    int reg = instr->RdField();
+    PrintRegister(reg);
+    return 2;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+// Handle all Cregister based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatCRegister(Instruction* instr, const char* format) {
+  ASSERT(format[0] == 'f');
+  if (format[1] == 's') {  // 'fs: fs register
+    int reg = instr->RsField();
+    PrintCRegister(reg);
+    return 2;
+  } else if (format[1] == 't') {  // 'ft: ft register
+    int reg = instr->RtField();
+    PrintCRegister(reg);
+    return 2;
+  } else if (format[1] == 'd') {  // 'fd: fd register
+    int reg = instr->RdField();
+    PrintCRegister(reg);
+    return 2;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.)  FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+  switch (format[0]) {
+    case 'c': {   // 'code for break or trap instructions
+      ASSERT(STRING_STARTS_WITH(format, "code"));
+      PrintCode(instr);
+      return 4;
+    }
+    case 'i': {   // 'imm16u or 'imm26
+      if (format[3] == '1') {
+        ASSERT(STRING_STARTS_WITH(format, "imm16"));
+        if (format[5] == 's') {
+          ASSERT(STRING_STARTS_WITH(format, "imm16s"));
+          PrintSImm16(instr);
+        } else if (format[5] == 'u') {
+          ASSERT(STRING_STARTS_WITH(format, "imm16u"));
+          PrintSImm16(instr);
+        } else {
+          ASSERT(STRING_STARTS_WITH(format, "imm16x"));
+          PrintXImm16(instr);
+        }
+        return 6;
+      } else {
+        ASSERT(STRING_STARTS_WITH(format, "imm26"));
+        PrintImm26(instr);
+        return 5;
+      }
+    }
+    case 'r': {   // 'r: registers
+      return FormatRegister(instr, format);
+    }
+    case 'f': {   // 'f: Cregisters
+      return FormatCRegister(instr, format);
+    }
+    case 's': {   // 'sa
+      ASSERT(STRING_STARTS_WITH(format, "sa"));
+      PrintSa(instr);
+      return 2;
+    }
+  };
+  UNREACHABLE();
+  return -1;
+}
+
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+  char cur = *format++;
+  while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+    if (cur == '\'') {  // Single quote is used as the formatting escape.
+      format += FormatOption(instr, format);
+    } else {
+      out_buffer_[out_buffer_pos_++] = cur;
+    }
+    cur = *format++;
+  }
+  out_buffer_[out_buffer_pos_]  = '\0';
+}
+
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) {
+  Format(instr, "unknown");
+}
+
+
+void Decoder::DecodeTypeRegister(Instruction* instr) {
+  switch (instr->OpcodeFieldRaw()) {
+    case COP1:    // Coprocessor instructions
+      switch (instr->RsFieldRaw()) {
+        case BC1:   // branch on coprocessor condition
+          UNREACHABLE();
+          break;
+        case MFC1:
+          Format(instr, "mfc1 'rt, 'fs");
+          break;
+        case MFHC1:
+          Format(instr, "mfhc1  rt, 'fs");
+          break;
+        case MTC1:
+          Format(instr, "mtc1 'rt, 'fs");
+          break;
+        case MTHC1:
+          Format(instr, "mthc1  rt, 'fs");
+          break;
+        case S:
+        case D:
+          UNIMPLEMENTED_MIPS();
+          break;
+        case W:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_S_W:
+              UNIMPLEMENTED_MIPS();
+              break;
+            case CVT_D_W:   // Convert word to double.
+              Format(instr, "cvt.d.w  'fd, 'fs");
+              break;
+            default:
+              UNREACHABLE();
+          };
+          break;
+        case L:
+        case PS:
+          UNIMPLEMENTED_MIPS();
+          break;
+          break;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    case SPECIAL:
+      switch (instr->FunctionFieldRaw()) {
+        case JR:
+          Format(instr, "jr   'rs");
+          break;
+        case JALR:
+          Format(instr, "jalr 'rs");
+          break;
+        case SLL:
+          if ( 0x0 == static_cast<int>(instr->InstructionBits()))
+            Format(instr, "nop");
+          else
+            Format(instr, "sll  'rd, 'rt, 'sa");
+          break;
+        case SRL:
+          Format(instr, "srl  'rd, 'rt, 'sa");
+          break;
+        case SRA:
+          Format(instr, "sra  'rd, 'rt, 'sa");
+          break;
+        case SLLV:
+          Format(instr, "sllv 'rd, 'rt, 'rs");
+          break;
+        case SRLV:
+          Format(instr, "srlv 'rd, 'rt, 'rs");
+          break;
+        case SRAV:
+          Format(instr, "srav 'rd, 'rt, 'rs");
+          break;
+        case MFHI:
+          Format(instr, "mfhi 'rd");
+          break;
+        case MFLO:
+          Format(instr, "mflo 'rd");
+          break;
+        case MULT:
+          Format(instr, "mult 'rs, 'rt");
+          break;
+        case MULTU:
+          Format(instr, "multu  'rs, 'rt");
+          break;
+        case DIV:
+          Format(instr, "div  'rs, 'rt");
+          break;
+        case DIVU:
+          Format(instr, "divu 'rs, 'rt");
+          break;
+        case ADD:
+          Format(instr, "add  'rd, 'rs, 'rt");
+          break;
+        case ADDU:
+          Format(instr, "addu 'rd, 'rs, 'rt");
+          break;
+        case SUB:
+          Format(instr, "sub  'rd, 'rs, 'rt");
+          break;
+        case SUBU:
+          Format(instr, "sub  'rd, 'rs, 'rt");
+          break;
+        case AND:
+          Format(instr, "and  'rd, 'rs, 'rt");
+          break;
+        case OR:
+          if (0 == instr->RsField()) {
+            Format(instr, "mov  'rd, 'rt");
+          } else if (0 == instr->RtField()) {
+            Format(instr, "mov  'rd, 'rs");
+          } else {
+            Format(instr, "or   'rd, 'rs, 'rt");
+          }
+          break;
+        case XOR:
+          Format(instr, "xor  'rd, 'rs, 'rt");
+          break;
+        case NOR:
+          Format(instr, "nor  'rd, 'rs, 'rt");
+          break;
+        case SLT:
+          Format(instr, "slt  'rd, 'rs, 'rt");
+          break;
+        case SLTU:
+          Format(instr, "sltu 'rd, 'rs, 'rt");
+          break;
+        case BREAK:
+          Format(instr, "break, code: 'code");
+          break;
+        case TGE:
+          Format(instr, "tge  'rs, 'rt, code: 'code");
+          break;
+        case TGEU:
+          Format(instr, "tgeu 'rs, 'rt, code: 'code");
+          break;
+        case TLT:
+          Format(instr, "tlt  'rs, 'rt, code: 'code");
+          break;
+        case TLTU:
+          Format(instr, "tltu 'rs, 'rt, code: 'code");
+          break;
+        case TEQ:
+          Format(instr, "teq  'rs, 'rt, code: 'code");
+          break;
+        case TNE:
+          Format(instr, "tne  'rs, 'rt, code: 'code");
+          break;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    case SPECIAL2:
+      switch (instr->FunctionFieldRaw()) {
+        case MUL:
+          break;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    default:
+      UNREACHABLE();
+  };
+}
+
+
+void Decoder::DecodeTypeImmediate(Instruction* instr) {
+  switch (instr->OpcodeFieldRaw()) {
+    // ------------- REGIMM class.
+    case REGIMM:
+      switch (instr->RtFieldRaw()) {
+        case BLTZ:
+          Format(instr, "bltz 'rs, 'imm16u");
+          break;
+        case BLTZAL:
+          Format(instr, "bltzal 'rs, 'imm16u");
+          break;
+        case BGEZ:
+          Format(instr, "bgez 'rs, 'imm16u");
+          break;
+        case BGEZAL:
+          Format(instr, "bgezal 'rs, 'imm16u");
+          break;
+        default:
+          UNREACHABLE();
+      };
+    break;  // case REGIMM
+    // ------------- Branch instructions.
+    case BEQ:
+      Format(instr, "beq  'rs, 'rt, 'imm16u");
+      break;
+    case BNE:
+      Format(instr, "bne  'rs, 'rt, 'imm16u");
+      break;
+    case BLEZ:
+      Format(instr, "blez 'rs, 'imm16u");
+      break;
+    case BGTZ:
+      Format(instr, "bgtz 'rs, 'imm16u");
+      break;
+    // ------------- Arithmetic instructions.
+    case ADDI:
+      Format(instr, "addi   'rt, 'rs, 'imm16s");
+      break;
+    case ADDIU:
+      Format(instr, "addiu  'rt, 'rs, 'imm16s");
+      break;
+    case SLTI:
+      Format(instr, "slti   'rt, 'rs, 'imm16s");
+      break;
+    case SLTIU:
+      Format(instr, "sltiu  'rt, 'rs, 'imm16u");
+      break;
+    case ANDI:
+      Format(instr, "andi   'rt, 'rs, 'imm16x");
+      break;
+    case ORI:
+      Format(instr, "ori    'rt, 'rs, 'imm16x");
+      break;
+    case XORI:
+      Format(instr, "xori   'rt, 'rs, 'imm16x");
+      break;
+    case LUI:
+      Format(instr, "lui    'rt, 'imm16x");
+      break;
+    // ------------- Memory instructions.
+    case LB:
+      Format(instr, "lb     'rt, 'imm16s('rs)");
+      break;
+    case LW:
+      Format(instr, "lw     'rt, 'imm16s('rs)");
+      break;
+    case LBU:
+      Format(instr, "lbu    'rt, 'imm16s('rs)");
+      break;
+    case SB:
+      Format(instr, "sb     'rt, 'imm16s('rs)");
+      break;
+    case SW:
+      Format(instr, "sw     'rt, 'imm16s('rs)");
+      break;
+    case LWC1:
+      Format(instr, "lwc1   'ft, 'imm16s('rs)");
+      break;
+    case LDC1:
+      Format(instr, "ldc1   'ft, 'imm16s('rs)");
+      break;
+    case SWC1:
+      Format(instr, "swc1   'rt, 'imm16s('fs)");
+      break;
+    case SDC1:
+      Format(instr, "sdc1   'rt, 'imm16s('fs)");
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  };
+}
+
+
+void Decoder::DecodeTypeJump(Instruction* instr) {
+  switch (instr->OpcodeFieldRaw()) {
+    case J:
+      Format(instr, "j    'imm26");
+      break;
+    case JAL:
+      Format(instr, "jal  'imm26");
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int Decoder::InstructionDecode(byte_* instr_ptr) {
+  Instruction* instr = Instruction::At(instr_ptr);
+  // Print raw instruction bytes.
+  out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                       "%08x       ",
+                                       instr->InstructionBits());
+  switch (instr->InstructionType()) {
+    case Instruction::kRegisterType: {
+      DecodeTypeRegister(instr);
+      break;
+    }
+    case Instruction::kImmediateType: {
+      DecodeTypeImmediate(instr);
+      break;
+    }
+    case Instruction::kJumpType: {
+      DecodeTypeJump(instr);
+      break;
+    }
+    default: {
+      UNSUPPORTED_MIPS();
+    }
+  }
+  return Instruction::kInstructionSize;
+}
+
+
+} }  // namespace assembler::mips
+
+
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+namespace v8i = v8::internal;
+
+
+const char* NameConverter::NameOfAddress(byte_* addr) const {
+  static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
+  v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
+  return tmp_buffer.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte_* addr) const {
+  return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+  return assembler::mips::Registers::Name(reg);
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+  return assembler::mips::FPURegister::Name(reg);
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+  UNREACHABLE();  // MIPS does not have the concept of a byte register
+  return "nobytereg";
+}
+
+
+const char* NameConverter::NameInCode(byte_* addr) const {
+  // The default name converter is called for unknown code. So we will not try
+  // to access any memory.
+  return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+    : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+                                    byte_* instruction) {
+  assembler::mips::Decoder d(converter_, buffer);
+  return d.InstructionDecode(instruction);
+}
+
+
+int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
+  UNIMPLEMENTED_MIPS();
+  return -1;
+}
+
+
+void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
+  NameConverter converter;
+  Disassembler d(converter);
+  for (byte_* pc = begin; pc < end;) {
+    v8::internal::EmbeddedVector<char, 128> buffer;
+    buffer[0] = '\0';
+    byte_* prev_pc = pc;
+    pc += d.InstructionDecode(buffer, pc);
+    fprintf(f, "%p    %08x      %s\n",
+            prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+  }
+}
+
+#undef UNSUPPORTED
+
+}  // namespace disasm
+
diff --git a/src/mips/fast-codegen-mips.cc b/src/mips/fast-codegen-mips.cc
new file mode 100644
index 0000000..c47f632
--- /dev/null
+++ b/src/mips/fast-codegen-mips.cc
@@ -0,0 +1,56 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+void FastCodeGenerator::Generate(CompilationInfo* info) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+  UNIMPLEMENTED_MIPS();
+}
+
+#undef __
+
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
new file mode 100644
index 0000000..d2c717c
--- /dev/null
+++ b/src/mips/frames-mips.cc
@@ -0,0 +1,100 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "frames-inl.h"
+#include "mips/assembler-mips-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+StackFrame::Type StackFrame::ComputeType(State* state) {
+  ASSERT(state->fp != NULL);
+  if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+    return ARGUMENTS_ADAPTOR;
+  }
+  // The marker and function offsets overlap. If the marker isn't a
+  // smi then the frame is a JavaScript frame -- and the marker is
+  // really the function.
+  const int offset = StandardFrameConstants::kMarkerOffset;
+  Object* marker = Memory::Object_at(state->fp + offset);
+  if (!marker->IsSmi()) return JAVA_SCRIPT;
+  return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+}
+
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+  if (fp == 0) return NONE;
+  // Compute frame type and stack pointer.
+  Address sp = fp + ExitFrameConstants::kSPDisplacement;
+  const int offset = ExitFrameConstants::kCodeOffset;
+  Object* code = Memory::Object_at(fp + offset);
+  bool is_debug_exit = code->IsSmi();
+  if (is_debug_exit) {
+    sp -= kNumJSCallerSaved * kPointerSize;
+  }
+  // Fill in the state.
+  state->sp = sp;
+  state->fp = fp;
+  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+  return EXIT;
+}
+
+
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+  // Do nothing
+}
+
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+  return ComputeParametersCount();
+}
+
+
+Address JavaScriptFrame::GetCallerStackPointer() const {
+  UNIMPLEMENTED_MIPS();
+  return static_cast<Address>(NULL);  // UNIMPLEMENTED RETURN
+}
+
+
+Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+  UNIMPLEMENTED_MIPS();
+  return static_cast<Address>(NULL);  // UNIMPLEMENTED RETURN
+}
+
+
+Address InternalFrame::GetCallerStackPointer() const {
+  UNIMPLEMENTED_MIPS();
+  return static_cast<Address>(NULL);  // UNIMPLEMENTED RETURN
+}
+
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
new file mode 100644
index 0000000..ec1949d
--- /dev/null
+++ b/src/mips/frames-mips.h
@@ -0,0 +1,164 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#ifndef V8_MIPS_FRAMES_MIPS_H_
+#define V8_MIPS_FRAMES_MIPS_H_
+
+
+namespace v8 {
+namespace internal {
+
+// Register lists.
+// Note that the bit values must match those used in actual instruction
+// encoding.
+static const int kNumRegs = 32;
+
+static const RegList kJSCallerSaved =
+  1 << 4 |  // a0
+  1 << 5 |  // a1
+  1 << 6 |  // a2
+  1 << 7;   // a3
+
+static const int kNumJSCallerSaved = 4;
+
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0.
+int JSCallerSavedCode(int n);
+
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+static const RegList kCalleeSaved =
+  // Saved temporaries.
+  1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 |
+  1 << 20 | 1 << 21 | 1 << 22 | 1 << 23 |
+  // gp, sp, fp
+  1 << 28 | 1 << 29 | 1 << 30;
+
+static const int kNumCalleeSaved = 11;
+
+
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+
+// ----------------------------------------------------
+
+class StackHandlerConstants : public AllStatic {
+ public:
+  static const int kNextOffset  = 0 * kPointerSize;
+  static const int kStateOffset = 1 * kPointerSize;
+  static const int kFPOffset    = 2 * kPointerSize;
+  static const int kPCOffset    = 3 * kPointerSize;
+
+  static const int kSize = kPCOffset + kPointerSize;
+};
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+  static const int kCallerFPOffset      = -3 * kPointerSize;
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+  // Exit frames have a debug marker on the stack.
+  static const int kSPDisplacement = -1 * kPointerSize;
+
+  // The debug marker is just above the frame pointer.
+  static const int kDebugMarkOffset = -1 * kPointerSize;
+  // Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
+  static const int kCodeOffset = -1 * kPointerSize;
+
+  static const int kSavedRegistersOffset = 0 * kPointerSize;
+
+  // The caller fields are below the frame pointer on the stack.
+  static const int kCallerFPOffset = +0 * kPointerSize;
+  // The calling JS function is between FP and PC.
+  static const int kCallerPCOffset = +1 * kPointerSize;
+
+  // FP-relative displacement of the caller's SP.
+  static const int kCallerSPDisplacement = +4 * kPointerSize;
+};
+
+
+class StandardFrameConstants : public AllStatic {
+ public:
+  static const int kExpressionsOffset = -3 * kPointerSize;
+  static const int kMarkerOffset      = -2 * kPointerSize;
+  static const int kContextOffset     = -1 * kPointerSize;
+  static const int kCallerFPOffset    =  0 * kPointerSize;
+  static const int kCallerPCOffset    = +1 * kPointerSize;
+  static const int kCallerSPOffset    = +2 * kPointerSize;
+
+  // Size of the MIPS 4 32-bit argument slots.
+  // This is just an alias with a shorter name. Use it from now on.
+  static const int kRArgsSlotsSize = 4 * kPointerSize;
+  static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
+
+  // C/C++ argument slots size.
+  static const int kCArgsSlotsSize = 4 * kPointerSize;
+  // JS argument slots size.
+  static const int kJSArgsSlotsSize = 0 * kPointerSize;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+  // Caller SP-relative.
+  static const int kParam0Offset   = -2 * kPointerSize;
+  static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+  static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+  const int offset = JavaScriptFrameConstants::kFunctionOffset;
+  return Memory::Object_at(fp() + offset);
+}
+
+} }  // namespace v8::internal
+
+#endif
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
new file mode 100644
index 0000000..920329e
--- /dev/null
+++ b/src/mips/full-codegen-mips.cc
@@ -0,0 +1,268 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "compiler.h"
+#include "debug.h"
+#include "full-codegen.h"
+#include "parser.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitReturnSequence(int position) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+  UNIMPLEMENTED_MIPS();
+}
+
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::DropAndApply(int count,
+                                     Expression::Context context,
+                                     Register reg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context,
+                              Label* materialize_true,
+                              Label* materialize_false) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::DoTest(Expression::Context context) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+  UNIMPLEMENTED_MIPS();
+  return MemOperand(zero_reg, 0);   // UNIMPLEMENTED RETURN
+}
+
+
+void FullCodeGenerator::Move(Register destination, Slot* source) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::Move(Slot* dst,
+                             Register src,
+                             Register scratch1,
+                             Register scratch2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
+                                         Expression::Context context) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+                                     Expression::Context context) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+                                               Expression::Context context) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
+                                       Handle<Object> ignored,
+                                       RelocInfo::Mode mode) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Register FullCodeGenerator::result_register() { return v0; }
+
+
+Register FullCodeGenerator::context_register() { return cp; }
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
new file mode 100644
index 0000000..5598cdf
--- /dev/null
+++ b/src/mips/ic-mips.cc
@@ -0,0 +1,187 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+  UNIMPLEMENTED_MIPS();
+}
+
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+  UNIMPLEMENTED_MIPS();
+}
+
+// Defined in ic.cc.
+Object* LoadIC_Miss(Arguments args);
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void LoadIC::ClearInlinedVersion(Address address) {}
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  return false;
+}
+
+void KeyedLoadIC::ClearInlinedVersion(Address address) {}
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+  return false;
+}
+
+void KeyedStoreIC::ClearInlinedVersion(Address address) {}
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+  return false;
+}
+
+
+Object* KeyedLoadIC_Miss(Arguments args);
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+}
+
+
+void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
+                                        ExternalArrayType array_type) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::Generate(MacroAssembler* masm,
+                            const ExternalReference& f) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
+                                         ExternalArrayType array_type) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+#undef __
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/jump-target-mips.cc b/src/mips/jump-target-mips.cc
new file mode 100644
index 0000000..3301d19
--- /dev/null
+++ b/src/mips/jump-target-mips.cc
@@ -0,0 +1,87 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "jump-target-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ ACCESS_MASM(cgen()->masm())
+
+void JumpTarget::DoJump() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void JumpTarget::DoBranch(Condition cc, Hint ignored) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void JumpTarget::Call() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void JumpTarget::DoBind() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakTarget::Jump() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakTarget::Jump(Result* arg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakTarget::Bind() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakTarget::Bind(Result* arg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
new file mode 100644
index 0000000..b733bdd
--- /dev/null
+++ b/src/mips/macro-assembler-mips.cc
@@ -0,0 +1,895 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(void* buffer, int size)
+    : Assembler(buffer, size),
+      unresolved_(0),
+      generating_stub_(false),
+      allow_stub_calls_(true),
+      code_object_(Heap::undefined_value()) {
+}
+
+
+
+void MacroAssembler::Jump(Register target, Condition cond,
+                          Register r1, const Operand& r2) {
+  Jump(Operand(target), cond, r1, r2);
+}
+
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+                          Condition cond, Register r1, const Operand& r2) {
+  Jump(Operand(target), cond, r1, r2);
+}
+
+
+void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
+                          Condition cond, Register r1, const Operand& r2) {
+  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+                          Condition cond, Register r1, const Operand& r2) {
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Call(Register target,
+                          Condition cond, Register r1, const Operand& r2) {
+  Call(Operand(target), cond, r1, r2);
+}
+
+
+void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
+                          Condition cond, Register r1, const Operand& r2) {
+  Call(Operand(target), cond, r1, r2);
+}
+
+
+void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
+                          Condition cond, Register r1, const Operand& r2) {
+  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+                          Condition cond, Register r1, const Operand& r2) {
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
+}
+
+
+void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
+  Jump(Operand(ra), cond, r1, r2);
+}
+
+
+void MacroAssembler::LoadRoot(Register destination,
+                              Heap::RootListIndex index) {
+  lw(destination, MemOperand(s4, index << kPointerSizeLog2));
+}
+
+void MacroAssembler::LoadRoot(Register destination,
+                              Heap::RootListIndex index,
+                              Condition cond,
+                              Register src1, const Operand& src2) {
+  Branch(NegateCondition(cond), 2, src1, src2);
+  nop();
+  lw(destination, MemOperand(s4, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::RecordWrite(Register object, Register offset,
+                                 Register scratch) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// ---------------------------------------------------------------------------
+// Instruction macros
+
+void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    add(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      addi(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      add(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    addu(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      addiu(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      addu(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    mul(rd, rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    ASSERT(!rs.is(at));
+    li(at, rt);
+    mul(rd, rs, at);
+  }
+}
+
+
+void MacroAssembler::Mult(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    mult(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    ASSERT(!rs.is(at));
+    li(at, rt);
+    mult(rs, at);
+  }
+}
+
+
+void MacroAssembler::Multu(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    multu(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    ASSERT(!rs.is(at));
+    li(at, rt);
+    multu(rs, at);
+  }
+}
+
+
+void MacroAssembler::Div(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    div(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    ASSERT(!rs.is(at));
+    li(at, rt);
+    div(rs, at);
+  }
+}
+
+
+void MacroAssembler::Divu(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    divu(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    ASSERT(!rs.is(at));
+    li(at, rt);
+    divu(rs, at);
+  }
+}
+
+
+void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    and_(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      andi(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      and_(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    or_(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      ori(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      or_(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    xor_(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      xori(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      xor_(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    nor(rd, rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    ASSERT(!rs.is(at));
+    li(at, rt);
+    nor(rd, rs, at);
+  }
+}
+
+
+void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    slt(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      slti(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      slt(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    sltu(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+      sltiu(rd, rs, rt.imm32_);
+    } else {
+      // li handles the relocation.
+      ASSERT(!rs.is(at));
+      li(at, rt);
+      sltu(rd, rs, at);
+    }
+  }
+}
+
+
+//------------Pseudo-instructions-------------
+
+void MacroAssembler::movn(Register rd, Register rt) {
+  addiu(at, zero_reg, -1);  // Fill at with ones.
+  xor_(rd, rt, at);
+}
+
+
+// load wartd in a register
+void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
+  ASSERT(!j.is_reg());
+
+  if (!MustUseAt(j.rmode_) && !gen2instr) {
+    // Normal load of an immediate value which does not need Relocation Info.
+    if (is_int16(j.imm32_)) {
+      addiu(rd, zero_reg, j.imm32_);
+    } else if (!(j.imm32_ & HIMask)) {
+      ori(rd, zero_reg, j.imm32_);
+    } else if (!(j.imm32_ & LOMask)) {
+      lui(rd, (HIMask & j.imm32_) >> 16);
+    } else {
+      lui(rd, (HIMask & j.imm32_) >> 16);
+      ori(rd, rd, (LOMask & j.imm32_));
+    }
+  } else if (MustUseAt(j.rmode_) || gen2instr) {
+    if (MustUseAt(j.rmode_)) {
+      RecordRelocInfo(j.rmode_, j.imm32_);
+    }
+    // We need always the same number of instructions as we may need to patch
+    // this code to load another value which may need 2 instructions to load.
+    if (is_int16(j.imm32_)) {
+      nop();
+      addiu(rd, zero_reg, j.imm32_);
+    } else if (!(j.imm32_ & HIMask)) {
+      nop();
+      ori(rd, zero_reg, j.imm32_);
+    } else if (!(j.imm32_ & LOMask)) {
+      nop();
+      lui(rd, (HIMask & j.imm32_) >> 16);
+    } else {
+      lui(rd, (HIMask & j.imm32_) >> 16);
+      ori(rd, rd, (LOMask & j.imm32_));
+    }
+  }
+}
+
+
+// Exception-generating instructions and debugging support
+void MacroAssembler::stop(const char* msg) {
+  // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
+  // We use the 0x54321 value to be able to find it easily when reading memory.
+  break_(0x54321);
+}
+
+
+void MacroAssembler::MultiPush(RegList regs) {
+  int16_t NumSaved = 0;
+  int16_t NumToPush = NumberOfBitsSet(regs);
+
+  addiu(sp, sp, -4 * NumToPush);
+  for (int16_t i = 0; i < kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
+    }
+  }
+}
+
+
+void MacroAssembler::MultiPushReversed(RegList regs) {
+  int16_t NumSaved = 0;
+  int16_t NumToPush = NumberOfBitsSet(regs);
+
+  addiu(sp, sp, -4 * NumToPush);
+  for (int16_t i = kNumRegisters; i > 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
+    }
+  }
+}
+
+
+void MacroAssembler::MultiPop(RegList regs) {
+  int16_t NumSaved = 0;
+
+  for (int16_t i = kNumRegisters; i > 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
+    }
+  }
+  addiu(sp, sp, 4 * NumSaved);
+}
+
+
+void MacroAssembler::MultiPopReversed(RegList regs) {
+  int16_t NumSaved = 0;
+
+  for (int16_t i = 0; i < kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
+    }
+  }
+  addiu(sp, sp, 4 * NumSaved);
+}
+
+
+// Emulated condtional branches do not emit a nop in the branch delay slot.
+
+// Trashes the at register if no scratch register is provided.
+void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
+                            const Operand& rt, Register scratch) {
+  Register r2;
+  if (rt.is_reg()) {
+    // We don't want any other register but scratch clobbered.
+    ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
+    r2 = rt.rm_;
+  } else if (cond != cc_always) {
+    // We don't want any other register but scratch clobbered.
+    ASSERT(!scratch.is(rs));
+    r2 = scratch;
+    li(r2, rt);
+  }
+
+  switch (cond) {
+    case cc_always:
+      b(offset);
+      break;
+    case eq:
+      beq(rs, r2, offset);
+      break;
+    case ne:
+      bne(rs, r2, offset);
+      break;
+
+      // Signed comparison
+    case greater:
+      slt(scratch, r2, rs);
+      bne(scratch, zero_reg, offset);
+      break;
+    case greater_equal:
+      slt(scratch, rs, r2);
+      beq(scratch, zero_reg, offset);
+      break;
+    case less:
+      slt(scratch, rs, r2);
+      bne(scratch, zero_reg, offset);
+      break;
+    case less_equal:
+      slt(scratch, r2, rs);
+      beq(scratch, zero_reg, offset);
+      break;
+
+      // Unsigned comparison.
+    case Ugreater:
+      sltu(scratch, r2, rs);
+      bne(scratch, zero_reg, offset);
+      break;
+    case Ugreater_equal:
+      sltu(scratch, rs, r2);
+      beq(scratch, zero_reg, offset);
+      break;
+    case Uless:
+      sltu(scratch, rs, r2);
+      bne(scratch, zero_reg, offset);
+      break;
+    case Uless_equal:
+      sltu(scratch, r2, rs);
+      beq(scratch, zero_reg, offset);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void MacroAssembler::Branch(Condition cond,  Label* L, Register rs,
+                            const Operand& rt, Register scratch) {
+  Register r2;
+  if (rt.is_reg()) {
+    r2 = rt.rm_;
+  } else if (cond != cc_always) {
+    r2 = scratch;
+    li(r2, rt);
+  }
+
+  // We use branch_offset as an argument for the branch instructions to be sure
+  // it is called just before generating the branch instruction, as needed.
+
+  switch (cond) {
+    case cc_always:
+      b(shifted_branch_offset(L, false));
+      break;
+    case eq:
+      beq(rs, r2, shifted_branch_offset(L, false));
+      break;
+    case ne:
+      bne(rs, r2, shifted_branch_offset(L, false));
+      break;
+
+    // Signed comparison
+    case greater:
+      slt(scratch, r2, rs);
+      bne(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+    case greater_equal:
+      slt(scratch, rs, r2);
+      beq(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+    case less:
+      slt(scratch, rs, r2);
+      bne(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+    case less_equal:
+      slt(scratch, r2, rs);
+      beq(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+
+    // Unsigned comparison.
+    case Ugreater:
+      sltu(scratch, r2, rs);
+      bne(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+    case Ugreater_equal:
+      sltu(scratch, rs, r2);
+      beq(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+    case Uless:
+      sltu(scratch, rs, r2);
+      bne(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+    case Uless_equal:
+      sltu(scratch, r2, rs);
+      beq(scratch, zero_reg, shifted_branch_offset(L, false));
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+// Trashes the at register if no scratch register is provided.
+// We need to use a bgezal or bltzal, but they can't be used directly with the
+// slt instructions. We could use sub or add instead but we would miss overflow
+// cases, so we keep slt and add an intermediate third instruction.
+void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
+                                   const Operand& rt, Register scratch) {
+  Register r2;
+  if (rt.is_reg()) {
+    r2 = rt.rm_;
+  } else if (cond != cc_always) {
+    r2 = scratch;
+    li(r2, rt);
+  }
+
+  switch (cond) {
+    case cc_always:
+      bal(offset);
+      break;
+    case eq:
+      bne(rs, r2, 2);
+      nop();
+      bal(offset);
+      break;
+    case ne:
+      beq(rs, r2, 2);
+      nop();
+      bal(offset);
+      break;
+
+    // Signed comparison
+    case greater:
+      slt(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, offset);
+      break;
+    case greater_equal:
+      slt(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, offset);
+      break;
+    case less:
+      slt(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, offset);
+      break;
+    case less_equal:
+      slt(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, offset);
+      break;
+
+    // Unsigned comparison.
+    case Ugreater:
+      sltu(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, offset);
+      break;
+    case Ugreater_equal:
+      sltu(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, offset);
+      break;
+    case Uless:
+      sltu(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, offset);
+      break;
+    case Uless_equal:
+      sltu(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, offset);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
+                                   const Operand& rt, Register scratch) {
+  Register r2;
+  if (rt.is_reg()) {
+    r2 = rt.rm_;
+  } else if (cond != cc_always) {
+    r2 = scratch;
+    li(r2, rt);
+  }
+
+  switch (cond) {
+    case cc_always:
+      bal(shifted_branch_offset(L, false));
+      break;
+    case eq:
+      bne(rs, r2, 2);
+      nop();
+      bal(shifted_branch_offset(L, false));
+      break;
+    case ne:
+      beq(rs, r2, 2);
+      nop();
+      bal(shifted_branch_offset(L, false));
+      break;
+
+    // Signed comparison
+    case greater:
+      slt(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, shifted_branch_offset(L, false));
+      break;
+    case greater_equal:
+      slt(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, shifted_branch_offset(L, false));
+      break;
+    case less:
+      slt(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, shifted_branch_offset(L, false));
+      break;
+    case less_equal:
+      slt(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, shifted_branch_offset(L, false));
+      break;
+
+    // Unsigned comparison.
+    case Ugreater:
+      sltu(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, shifted_branch_offset(L, false));
+      break;
+    case Ugreater_equal:
+      sltu(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, shifted_branch_offset(L, false));
+      break;
+    case Uless:
+      sltu(scratch, rs, r2);
+      addiu(scratch, scratch, -1);
+      bgezal(scratch, shifted_branch_offset(L, false));
+      break;
+    case Uless_equal:
+      sltu(scratch, r2, rs);
+      addiu(scratch, scratch, -1);
+      bltzal(scratch, shifted_branch_offset(L, false));
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void MacroAssembler::Jump(const Operand& target,
+                          Condition cond, Register rs, const Operand& rt) {
+  if (target.is_reg()) {
+    if (cond == cc_always) {
+      jr(target.rm());
+    } else {
+      Branch(NegateCondition(cond), 2, rs, rt);
+      nop();
+      jr(target.rm());
+    }
+  } else {    // !target.is_reg()
+    if (!MustUseAt(target.rmode_)) {
+      if (cond == cc_always) {
+        j(target.imm32_);
+      } else {
+        Branch(NegateCondition(cond), 2, rs, rt);
+        nop();
+        j(target.imm32_);  // will generate only one instruction.
+      }
+    } else {  // MustUseAt(target)
+      li(at, rt);
+      if (cond == cc_always) {
+        jr(at);
+      } else {
+        Branch(NegateCondition(cond), 2, rs, rt);
+        nop();
+        jr(at);  // will generate only one instruction.
+      }
+    }
+  }
+}
+
+
+void MacroAssembler::Call(const Operand& target,
+                          Condition cond, Register rs, const Operand& rt) {
+  if (target.is_reg()) {
+    if (cond == cc_always) {
+      jalr(target.rm());
+    } else {
+      Branch(NegateCondition(cond), 2, rs, rt);
+      nop();
+      jalr(target.rm());
+    }
+  } else {    // !target.is_reg()
+    if (!MustUseAt(target.rmode_)) {
+      if (cond == cc_always) {
+        jal(target.imm32_);
+      } else {
+        Branch(NegateCondition(cond), 2, rs, rt);
+        nop();
+        jal(target.imm32_);  // will generate only one instruction.
+      }
+    } else {  // MustUseAt(target)
+      li(at, rt);
+      if (cond == cc_always) {
+        jalr(at);
+      } else {
+        Branch(NegateCondition(cond), 2, rs, rt);
+        nop();
+        jalr(at);  // will generate only one instruction.
+      }
+    }
+  }
+}
+
+void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::Drop(int count, Condition cond) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::Call(Label* target) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// ---------------------------------------------------------------------------
+// Exception handling
+
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+                                    HandlerType type) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::PopTryHandler() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+
+// ---------------------------------------------------------------------------
+// Activation frames
+
+void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
+                              Register r1, const Operand& r2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
+                                     int num_arguments,
+                                     int result_size) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
+                                            bool* resolved) {
+  UNIMPLEMENTED_MIPS();
+  return Handle<Code>(reinterpret_cast<Code*>(NULL));   // UNIMPLEMENTED RETURN
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+                                   InvokeJSFlags flags) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+                                Register scratch1, Register scratch2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+                                      Register scratch1, Register scratch2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+                                      Register scratch1, Register scratch2) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+
+void MacroAssembler::Assert(Condition cc, const char* msg,
+                            Register rs, Operand rt) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::Check(Condition cc, const char* msg,
+                           Register rs, Operand rt) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void MacroAssembler::Abort(const char* msg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
new file mode 100644
index 0000000..aea9836
--- /dev/null
+++ b/src/mips/macro-assembler-mips.h
@@ -0,0 +1,381 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+
+#include "assembler.h"
+#include "mips/assembler-mips.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declaration.
+class JumpTarget;
+
+// Register at is used for instruction generation. So it is not safe to use it
+// unless we know exactly what we do.
+
+// Registers aliases
+const Register cp = s7;     // JavaScript context pointer
+const Register fp = s8_fp;  // Alias fp
+
+enum InvokeJSFlags {
+  CALL_JS,
+  JUMP_JS
+};
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+  MacroAssembler(void* buffer, int size);
+
+  // Jump, Call, and Ret pseudo instructions implementing inter-working.
+  void Jump(const Operand& target,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Call(const Operand& target,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Jump(Register target,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Jump(byte* target, RelocInfo::Mode rmode,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Jump(Handle<Code> code, RelocInfo::Mode rmode,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Call(Register target,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Call(byte* target, RelocInfo::Mode rmode,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Call(Handle<Code> code, RelocInfo::Mode rmode,
+            Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Ret(Condition cond = cc_always,
+           Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Branch(Condition cond, int16_t offset, Register rs = zero_reg,
+              const Operand& rt = Operand(zero_reg), Register scratch = at);
+  void Branch(Condition cond, Label* L, Register rs = zero_reg,
+              const Operand& rt = Operand(zero_reg), Register scratch = at);
+  // conditionnal branch and link
+  void BranchAndLink(Condition cond, int16_t offset, Register rs = zero_reg,
+                     const Operand& rt = Operand(zero_reg),
+                     Register scratch = at);
+  void BranchAndLink(Condition cond, Label* L, Register rs = zero_reg,
+                     const Operand& rt = Operand(zero_reg),
+                     Register scratch = at);
+
+  // Emit code to discard a non-negative number of pointer-sized elements
+  // from the stack, clobbering only the sp register.
+  void Drop(int count, Condition cond = cc_always);
+
+  void Call(Label* target);
+
+  // Jump unconditionally to given label.
+  // We NEED a nop in the branch delay slot, as it used by v8, for example in
+  // CodeGenerator::ProcessDeferred().
+  // Use rather b(Label) for code generation.
+  void jmp(Label* L) {
+    Branch(cc_always, L);
+    nop();
+  }
+
+  // Load an object from the root table.
+  void LoadRoot(Register destination,
+                Heap::RootListIndex index);
+  void LoadRoot(Register destination,
+                Heap::RootListIndex index,
+                Condition cond, Register src1, const Operand& src2);
+
+  // Sets the remembered set bit for [address+offset], where address is the
+  // address of the heap object 'object'.  The address must be in the first 8K
+  // of an allocated page. The 'scratch' register is used in the
+  // implementation and all 3 registers are clobbered by the operation, as
+  // well as the ip register.
+  void RecordWrite(Register object, Register offset, Register scratch);
+
+
+  // ---------------------------------------------------------------------------
+  // Instruction macros
+
+#define DEFINE_INSTRUCTION(instr)                                       \
+  void instr(Register rd, Register rs, const Operand& rt);                     \
+  void instr(Register rd, Register rs, Register rt) {                          \
+    instr(rd, rs, Operand(rt));                                                \
+  }                                                                            \
+  void instr(Register rs, Register rt, int32_t j) {                            \
+    instr(rs, rt, Operand(j));                                                 \
+  }
+
+#define DEFINE_INSTRUCTION2(instr)                                      \
+  void instr(Register rs, const Operand& rt);                                  \
+  void instr(Register rs, Register rt) {                                       \
+    instr(rs, Operand(rt));                                                    \
+  }                                                                            \
+  void instr(Register rs, int32_t j) {                                         \
+    instr(rs, Operand(j));                                                     \
+  }
+
+  DEFINE_INSTRUCTION(Add);
+  DEFINE_INSTRUCTION(Addu);
+  DEFINE_INSTRUCTION(Mul);
+  DEFINE_INSTRUCTION2(Mult);
+  DEFINE_INSTRUCTION2(Multu);
+  DEFINE_INSTRUCTION2(Div);
+  DEFINE_INSTRUCTION2(Divu);
+
+  DEFINE_INSTRUCTION(And);
+  DEFINE_INSTRUCTION(Or);
+  DEFINE_INSTRUCTION(Xor);
+  DEFINE_INSTRUCTION(Nor);
+
+  DEFINE_INSTRUCTION(Slt);
+  DEFINE_INSTRUCTION(Sltu);
+
+#undef DEFINE_INSTRUCTION
+#undef DEFINE_INSTRUCTION2
+
+
+  //------------Pseudo-instructions-------------
+
+  void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
+  // Move the logical ones complement of source to dest.
+  void movn(Register rd, Register rt);
+
+
+  // load int32 in the rd register
+  void li(Register rd, Operand j, bool gen2instr = false);
+  inline void li(Register rd, int32_t j, bool gen2instr = false) {
+    li(rd, Operand(j), gen2instr);
+  }
+
+  // Exception-generating instructions and debugging support
+  void stop(const char* msg);
+
+
+  // Push multiple registers on the stack.
+  // With MultiPush, lower registers are pushed first on the stack.
+  // For example if you push t0, t1, s0, and ra you get:
+  // |                       |
+  // |-----------------------|
+  // |         t0            |                     +
+  // |-----------------------|                    |
+  // |         t1            |                    |
+  // |-----------------------|                    |
+  // |         s0            |                    v
+  // |-----------------------|                     -
+  // |         ra            |
+  // |-----------------------|
+  // |                       |
+  void MultiPush(RegList regs);
+  void MultiPushReversed(RegList regs);
+  void Push(Register src) {
+    Addu(sp, sp, Operand(-kPointerSize));
+    sw(src, MemOperand(sp, 0));
+  }
+  inline void push(Register src) { Push(src); }
+
+  void Push(Register src, Condition cond, Register tst1, Register tst2) {
+    // Since we don't have conditionnal execution we use a Branch.
+    Branch(cond, 3, tst1, Operand(tst2));
+    nop();
+    Addu(sp, sp, Operand(-kPointerSize));
+    sw(src, MemOperand(sp, 0));
+  }
+
+  // Pops multiple values from the stack and load them in the
+  // registers specified in regs. Pop order is the opposite as in MultiPush.
+  void MultiPop(RegList regs);
+  void MultiPopReversed(RegList regs);
+  void Pop(Register dst) {
+    lw(dst, MemOperand(sp, 0));
+    Addu(sp, sp, Operand(kPointerSize));
+  }
+  void Pop() {
+    Add(sp, sp, Operand(kPointerSize));
+  }
+
+
+  // ---------------------------------------------------------------------------
+  // Exception handling
+
+  // Push a new try handler and link into try handler chain.
+  // The return address must be passed in register lr.
+  // On exit, r0 contains TOS (code slot).
+  void PushTryHandler(CodeLocation try_location, HandlerType type);
+
+  // Unlink the stack handler on top of the stack from the try handler chain.
+  // Must preserve the result register.
+  void PopTryHandler();
+
+
+  // ---------------------------------------------------------------------------
+  // Support functions.
+
+  inline void BranchOnSmi(Register value, Label* smi_label,
+                          Register scratch = at) {
+    ASSERT_EQ(0, kSmiTag);
+    andi(scratch, value, kSmiTagMask);
+    Branch(eq, smi_label, scratch, Operand(zero_reg));
+  }
+
+
+  inline void BranchOnNotSmi(Register value, Label* not_smi_label,
+                             Register scratch = at) {
+    ASSERT_EQ(0, kSmiTag);
+    andi(scratch, value, kSmiTagMask);
+    Branch(ne, not_smi_label, scratch, Operand(zero_reg));
+  }
+
+
+  // ---------------------------------------------------------------------------
+  // Runtime calls
+
+  // Call a code stub.
+  void CallStub(CodeStub* stub, Condition cond = cc_always,
+                Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void CallJSExitStub(CodeStub* stub);
+
+  // Return from a code stub after popping its arguments.
+  void StubReturn(int argc);
+
+  // Call a runtime routine.
+  // Eventually this should be used for all C calls.
+  void CallRuntime(Runtime::Function* f, int num_arguments);
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+
+  // Tail call of a runtime routine (jump).
+  // Like JumpToRuntime, but also takes care of passing the number
+  // of parameters.
+  void TailCallRuntime(const ExternalReference& ext,
+                       int num_arguments,
+                       int result_size);
+
+  // Jump to the builtin routine.
+  void JumpToRuntime(const ExternalReference& builtin);
+
+  // Invoke specified builtin JavaScript function. Adds an entry to
+  // the unresolved list if the name does not resolve.
+  void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
+
+  // Store the code object for the given builtin in the target register and
+  // setup the function in r1.
+  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+  struct Unresolved {
+    int pc;
+    uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
+    const char* name;
+  };
+  List<Unresolved>* unresolved() { return &unresolved_; }
+
+  Handle<Object> CodeObject() { return code_object_; }
+
+
+  // ---------------------------------------------------------------------------
+  // Stack limit support
+
+  void StackLimitCheck(Label* on_stack_limit_hit);
+
+
+  // ---------------------------------------------------------------------------
+  // StatsCounter support
+
+  void SetCounter(StatsCounter* counter, int value,
+                  Register scratch1, Register scratch2);
+  void IncrementCounter(StatsCounter* counter, int value,
+                        Register scratch1, Register scratch2);
+  void DecrementCounter(StatsCounter* counter, int value,
+                        Register scratch1, Register scratch2);
+
+
+  // ---------------------------------------------------------------------------
+  // Debugging
+
+  // Calls Abort(msg) if the condition cc is not satisfied.
+  // Use --debug_code to enable.
+  void Assert(Condition cc, const char* msg, Register rs, Operand rt);
+
+  // Like Assert(), but always enabled.
+  void Check(Condition cc, const char* msg, Register rs, Operand rt);
+
+  // Print a message to stdout and abort execution.
+  void Abort(const char* msg);
+
+  // Verify restrictions about code generated in stubs.
+  void set_generating_stub(bool value) { generating_stub_ = value; }
+  bool generating_stub() { return generating_stub_; }
+  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
+  bool allow_stub_calls() { return allow_stub_calls_; }
+
+ private:
+  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+
+  // Get the code for the given builtin. Returns if able to resolve
+  // the function in the 'resolved' flag.
+  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+  List<Unresolved> unresolved_;
+  bool generating_stub_;
+  bool allow_stub_calls_;
+  // This handle will be patched with the code object on installation.
+  Handle<Object> code_object_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+// Generate a MemOperand for loading a field from an object.
+static inline MemOperand FieldMemOperand(Register object, int offset) {
+  return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+
+#ifdef GENERATED_CODE_COVERAGE
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+
diff --git a/src/mips/register-allocator-mips-inl.h b/src/mips/register-allocator-mips-inl.h
new file mode 100644
index 0000000..a876bee
--- /dev/null
+++ b/src/mips/register-allocator-mips-inl.h
@@ -0,0 +1,137 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
+#define V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
+
+#include "v8.h"
+#include "mips/assembler-mips.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+  // The code for this test relies on the order of register codes.
+  return reg.is(cp) || reg.is(s8_fp) || reg.is(sp);
+}
+
+
+int RegisterAllocator::ToNumber(Register reg) {
+  ASSERT(reg.is_valid() && !IsReserved(reg));
+  const int kNumbers[] = {
+    0,    // zero_reg
+    1,    // at
+    2,    // v0
+    3,    // v1
+    4,    // a0
+    5,    // a1
+    6,    // a2
+    7,    // a3
+    8,    // t0
+    9,    // t1
+    10,   // t2
+    11,   // t3
+    12,   // t4
+    13,   // t5
+    14,   // t
+    15,   // t7
+    16,   // t8
+    17,   // t9
+    18,   // s0
+    19,   // s1
+    20,   // s2
+    21,   // s3
+    22,   // s4
+    23,   // s5
+    24,   // s6
+    25,   // s7
+    26,   // k0
+    27,   // k1
+    28,   // gp
+    29,   // sp
+    30,   // s8_fp
+    31,   // ra
+  };
+  return kNumbers[reg.code()];
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+  ASSERT(num >= 0 && num < kNumRegisters);
+  const Register kRegisters[] = {
+    zero_reg,
+    at,
+    v0,
+    v1,
+    a0,
+    a1,
+    a2,
+    a3,
+    t0,
+    t1,
+    t2,
+    t3,
+    t4,
+    t5,
+    t6,
+    t7,
+    s0,
+    s1,
+    s2,
+    s3,
+    s4,
+    s5,
+    s6,
+    s7,
+    t8,
+    t9,
+    k0,
+    k1,
+    gp,
+    sp,
+    s8_fp,
+    ra
+  };
+  return kRegisters[num];
+}
+
+
+void RegisterAllocator::Initialize() {
+  Reset();
+  // The non-reserved a1 and ra registers are live on JS function entry.
+  Use(a1);  // JS function.
+  Use(ra);  // Return address.
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
+
diff --git a/src/mips/register-allocator-mips.cc b/src/mips/register-allocator-mips.cc
new file mode 100644
index 0000000..f48d3a6
--- /dev/null
+++ b/src/mips/register-allocator-mips.cc
@@ -0,0 +1,60 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void Result::ToRegister(Register target) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+  // No byte registers on MIPS.
+  UNREACHABLE();
+  return Result();
+}
+
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/register-allocator-mips.h b/src/mips/register-allocator-mips.h
new file mode 100644
index 0000000..e056fb8
--- /dev/null
+++ b/src/mips/register-allocator-mips.h
@@ -0,0 +1,46 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
+#define V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
+
+#include "mips/constants-mips.h"
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+  static const int kNumRegisters = assembler::mips::kNumRegisters;
+  static const int kInvalidRegister = assembler::mips::kInvalidRegister;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
+
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
new file mode 100644
index 0000000..2e2dc86
--- /dev/null
+++ b/src/mips/simulator-mips.cc
@@ -0,0 +1,1648 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include <cstdarg>
+#include "v8.h"
+
+#include "disasm.h"
+#include "assembler.h"
+#include "globals.h"    // Need the bit_cast
+#include "mips/constants-mips.h"
+#include "mips/simulator-mips.h"
+
+namespace v8i = v8::internal;
+
+#if !defined(__mips)
+
+// Only build the simulator if not compiling for real MIPS hardware.
+namespace assembler {
+namespace mips {
+
+using ::v8::internal::Object;
+using ::v8::internal::PrintF;
+using ::v8::internal::OS;
+using ::v8::internal::ReadLine;
+using ::v8::internal::DeleteArray;
+
+// Utils functions
+bool HaveSameSign(int32_t a, int32_t b) {
+  return ((a ^ b) > 0);
+}
+
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent was through
+// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
+// Library does not provide vsscanf.
+#define SScanF sscanf  // NOLINT
+
+// The Debugger class is used by the simulator while debugging simulated MIPS
+// code.
+class Debugger {
+ public:
+  explicit Debugger(Simulator* sim);
+  ~Debugger();
+
+  void Stop(Instruction* instr);
+  void Debug();
+
+ private:
+  // We set the breakpoint code to 0xfffff to easily recognize it.
+  static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xfffff << 6;
+  static const Instr kNopInstr =  0x0;
+
+  Simulator* sim_;
+
+  int32_t GetRegisterValue(int regnum);
+  bool GetValue(const char* desc, int32_t* value);
+
+  // Set or delete a breakpoint. Returns true if successful.
+  bool SetBreakpoint(Instruction* breakpc);
+  bool DeleteBreakpoint(Instruction* breakpc);
+
+  // Undo and redo all breakpoints. This is needed to bracket disassembly and
+  // execution to skip past breakpoints when run from the debugger.
+  void UndoBreakpoints();
+  void RedoBreakpoints();
+
+  // Print all registers with a nice formatting.
+  void PrintAllRegs();
+};
+
+Debugger::Debugger(Simulator* sim) {
+  sim_ = sim;
+}
+
+Debugger::~Debugger() {
+}
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+
+static void InitializeCoverage() {
+  char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+  if (file_name != NULL) {
+    coverage_log = fopen(file_name, "aw+");
+  }
+}
+
+
+void Debugger::Stop(Instruction* instr) {
+  UNIMPLEMENTED_MIPS();
+  char* str = reinterpret_cast<char*>(instr->InstructionBits());
+  if (strlen(str) > 0) {
+    if (coverage_log != NULL) {
+      fprintf(coverage_log, "%s\n", str);
+      fflush(coverage_log);
+    }
+    instr->SetInstructionBits(0x0);  // Overwrite with nop.
+  }
+  sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+}
+
+#else  // ndef GENERATED_CODE_COVERAGE
+
+#define UNSUPPORTED() printf("Unsupported instruction.\n");
+
+static void InitializeCoverage() {}
+
+
+void Debugger::Stop(Instruction* instr) {
+  const char* str = reinterpret_cast<char*>(instr->InstructionBits());
+  PrintF("Simulator hit %s\n", str);
+  sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+  Debug();
+}
+#endif  // def GENERATED_CODE_COVERAGE
+
+
+int32_t Debugger::GetRegisterValue(int regnum) {
+  if (regnum == kNumSimuRegisters) {
+    return sim_->get_pc();
+  } else {
+    return sim_->get_register(regnum);
+  }
+}
+
+
+bool Debugger::GetValue(const char* desc, int32_t* value) {
+  int regnum = Registers::Number(desc);
+  if (regnum != kInvalidRegister) {
+    *value = GetRegisterValue(regnum);
+    return true;
+  } else {
+    return SScanF(desc, "%i", value) == 1;
+  }
+  return false;
+}
+
+
+bool Debugger::SetBreakpoint(Instruction* breakpc) {
+  // Check if a breakpoint can be set. If not return without any side-effects.
+  if (sim_->break_pc_ != NULL) {
+    return false;
+  }
+
+  // Set the breakpoint.
+  sim_->break_pc_ = breakpc;
+  sim_->break_instr_ = breakpc->InstructionBits();
+  // Not setting the breakpoint instruction in the code itself. It will be set
+  // when the debugger shell continues.
+  return true;
+}
+
+
+bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+  }
+
+  sim_->break_pc_ = NULL;
+  sim_->break_instr_ = 0;
+  return true;
+}
+
+
+void Debugger::UndoBreakpoints() {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+  }
+}
+
+
+void Debugger::RedoBreakpoints() {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+  }
+}
+
+void Debugger::PrintAllRegs() {
+#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
+
+  PrintF("\n");
+  // at, v0, a0
+  PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+         REG_INFO(1), REG_INFO(2), REG_INFO(4));
+  // v1, a1
+  PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+         "", REG_INFO(3), REG_INFO(5));
+  // a2
+  PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6));
+  // a3
+  PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(7));
+  PrintF("\n");
+  // t0-t7, s0-s7
+  for (int i = 0; i < 8; i++) {
+    PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+           REG_INFO(8+i), REG_INFO(16+i));
+  }
+  PrintF("\n");
+  // t8, k0, LO
+  PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+         REG_INFO(24), REG_INFO(26), REG_INFO(32));
+  // t9, k1, HI
+  PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+         REG_INFO(25), REG_INFO(27), REG_INFO(33));
+  // sp, fp, gp
+  PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+         REG_INFO(29), REG_INFO(30), REG_INFO(28));
+  // pc
+  PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
+         REG_INFO(31), REG_INFO(34));
+#undef REG_INFO
+}
+
+void Debugger::Debug() {
+  intptr_t last_pc = -1;
+  bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+  char cmd[COMMAND_SIZE + 1];
+  char arg1[ARG_SIZE + 1];
+  char arg2[ARG_SIZE + 1];
+
+  // make sure to have a proper terminating character if reaching the limit
+  cmd[COMMAND_SIZE] = 0;
+  arg1[ARG_SIZE] = 0;
+  arg2[ARG_SIZE] = 0;
+
+  // Undo all set breakpoints while running in the debugger shell. This will
+  // make them invisible to all commands.
+  UndoBreakpoints();
+
+  while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+    if (last_pc != sim_->get_pc()) {
+      disasm::NameConverter converter;
+      disasm::Disassembler dasm(converter);
+      // use a reasonably large buffer
+      v8::internal::EmbeddedVector<char, 256> buffer;
+      dasm.InstructionDecode(buffer,
+                             reinterpret_cast<byte_*>(sim_->get_pc()));
+      PrintF("  0x%08x  %s\n", sim_->get_pc(), buffer.start());
+      last_pc = sim_->get_pc();
+    }
+    char* line = ReadLine("sim> ");
+    if (line == NULL) {
+      break;
+    } else {
+      // Use sscanf to parse the individual parts of the command line. At the
+      // moment no command expects more than two parameters.
+      int args = SScanF(line,
+                        "%" XSTR(COMMAND_SIZE) "s "
+                        "%" XSTR(ARG_SIZE) "s "
+                        "%" XSTR(ARG_SIZE) "s",
+                        cmd, arg1, arg2);
+      if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+        if (!(reinterpret_cast<Instruction*>(sim_->get_pc())->IsTrap())) {
+          sim_->InstructionDecode(
+                                reinterpret_cast<Instruction*>(sim_->get_pc()));
+        } else {
+          // Allow si to jump over generated breakpoints.
+          PrintF("/!\\ Jumping over generated breakpoint.\n");
+          sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+        }
+      } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+        // Execute the one instruction we broke at with breakpoints disabled.
+        sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
+        // Leave the debugger shell.
+        done = true;
+      } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+        if (args == 2) {
+          int32_t value;
+          if (strcmp(arg1, "all") == 0) {
+            PrintAllRegs();
+          } else {
+            if (GetValue(arg1, &value)) {
+              PrintF("%s: 0x%08x %d \n", arg1, value, value);
+            } else {
+              PrintF("%s unrecognized\n", arg1);
+            }
+          }
+        } else {
+          PrintF("print <register>\n");
+        }
+      } else if ((strcmp(cmd, "po") == 0)
+                 || (strcmp(cmd, "printobject") == 0)) {
+        if (args == 2) {
+          int32_t value;
+          if (GetValue(arg1, &value)) {
+            Object* obj = reinterpret_cast<Object*>(value);
+            PrintF("%s: \n", arg1);
+#ifdef DEBUG
+            obj->PrintLn();
+#else
+            obj->ShortPrint();
+            PrintF("\n");
+#endif
+          } else {
+            PrintF("%s unrecognized\n", arg1);
+          }
+        } else {
+          PrintF("printobject <value>\n");
+        }
+      } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0)) {
+        disasm::NameConverter converter;
+        disasm::Disassembler dasm(converter);
+        // use a reasonably large buffer
+        v8::internal::EmbeddedVector<char, 256> buffer;
+
+        byte_* cur = NULL;
+        byte_* end = NULL;
+
+        if (args == 1) {
+          cur = reinterpret_cast<byte_*>(sim_->get_pc());
+          end = cur + (10 * Instruction::kInstructionSize);
+        } else if (args == 2) {
+          int32_t value;
+          if (GetValue(arg1, &value)) {
+            cur = reinterpret_cast<byte_*>(value);
+            // no length parameter passed, assume 10 instructions
+            end = cur + (10 * Instruction::kInstructionSize);
+          }
+        } else {
+          int32_t value1;
+          int32_t value2;
+          if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+            cur = reinterpret_cast<byte_*>(value1);
+            end = cur + (value2 * Instruction::kInstructionSize);
+          }
+        }
+
+        while (cur < end) {
+          dasm.InstructionDecode(buffer, cur);
+          PrintF("  0x%08x  %s\n", cur, buffer.start());
+          cur += Instruction::kInstructionSize;
+        }
+      } else if (strcmp(cmd, "gdb") == 0) {
+        PrintF("relinquishing control to gdb\n");
+        v8::internal::OS::DebugBreak();
+        PrintF("regaining control from gdb\n");
+      } else if (strcmp(cmd, "break") == 0) {
+        if (args == 2) {
+          int32_t value;
+          if (GetValue(arg1, &value)) {
+            if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
+              PrintF("setting breakpoint failed\n");
+            }
+          } else {
+            PrintF("%s unrecognized\n", arg1);
+          }
+        } else {
+          PrintF("break <address>\n");
+        }
+      } else if (strcmp(cmd, "del") == 0) {
+        if (!DeleteBreakpoint(NULL)) {
+          PrintF("deleting breakpoint failed\n");
+        }
+      } else if (strcmp(cmd, "flags") == 0) {
+        PrintF("No flags on MIPS !\n");
+      } else if (strcmp(cmd, "unstop") == 0) {
+          PrintF("Unstop command not implemented on MIPS.");
+      } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
+        // Print registers and disassemble
+        PrintAllRegs();
+        PrintF("\n");
+
+        disasm::NameConverter converter;
+        disasm::Disassembler dasm(converter);
+        // use a reasonably large buffer
+        v8::internal::EmbeddedVector<char, 256> buffer;
+
+        byte_* cur = NULL;
+        byte_* end = NULL;
+
+        if (args == 1) {
+          cur = reinterpret_cast<byte_*>(sim_->get_pc());
+          end = cur + (10 * Instruction::kInstructionSize);
+        } else if (args == 2) {
+          int32_t value;
+          if (GetValue(arg1, &value)) {
+            cur = reinterpret_cast<byte_*>(value);
+            // no length parameter passed, assume 10 instructions
+            end = cur + (10 * Instruction::kInstructionSize);
+          }
+        } else {
+          int32_t value1;
+          int32_t value2;
+          if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+            cur = reinterpret_cast<byte_*>(value1);
+            end = cur + (value2 * Instruction::kInstructionSize);
+          }
+        }
+
+        while (cur < end) {
+          dasm.InstructionDecode(buffer, cur);
+          PrintF("  0x%08x  %s\n", cur, buffer.start());
+          cur += Instruction::kInstructionSize;
+        }
+      } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+        PrintF("cont\n");
+        PrintF("  continue execution (alias 'c')\n");
+        PrintF("stepi\n");
+        PrintF("  step one instruction (alias 'si')\n");
+        PrintF("print <register>\n");
+        PrintF("  print register content (alias 'p')\n");
+        PrintF("  use register name 'all' to print all registers\n");
+        PrintF("printobject <register>\n");
+        PrintF("  print an object from a register (alias 'po')\n");
+        PrintF("flags\n");
+        PrintF("  print flags\n");
+        PrintF("disasm [<instructions>]\n");
+        PrintF("disasm [[<address>] <instructions>]\n");
+        PrintF("  disassemble code, default is 10 instructions from pc\n");
+        PrintF("gdb\n");
+        PrintF("  enter gdb\n");
+        PrintF("break <address>\n");
+        PrintF("  set a break point on the address\n");
+        PrintF("del\n");
+        PrintF("  delete the breakpoint\n");
+        PrintF("unstop\n");
+        PrintF("  ignore the stop instruction at the current location");
+        PrintF(" from now on\n");
+      } else {
+        PrintF("Unknown command: %s\n", cmd);
+      }
+    }
+    DeleteArray(line);
+  }
+
+  // Add all the breakpoints back to stop execution and enter the debugger
+  // shell when hit.
+  RedoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+
+// Create one simulator per thread and keep it in thread local storage.
+static v8::internal::Thread::LocalStorageKey simulator_key;
+
+
+bool Simulator::initialized_ = false;
+
+
+void Simulator::Initialize() {
+  if (initialized_) return;
+  simulator_key = v8::internal::Thread::CreateThreadLocalKey();
+  initialized_ = true;
+  ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+}
+
+
+Simulator::Simulator() {
+  Initialize();
+  // Setup simulator support first. Some of this information is needed to
+  // setup the architecture state.
+  size_t stack_size = 1 * 1024*1024;  // allocate 1MB for stack
+  stack_ = reinterpret_cast<char*>(malloc(stack_size));
+  pc_modified_ = false;
+  icount_ = 0;
+  break_pc_ = NULL;
+  break_instr_ = 0;
+
+  // Setup architecture state.
+  // All registers are initialized to zero to start with.
+  for (int i = 0; i < kNumSimuRegisters; i++) {
+    registers_[i] = 0;
+  }
+
+  // The sp is initialized to point to the bottom (high address) of the
+  // allocated stack area. To be safe in potential stack underflows we leave
+  // some buffer below.
+  registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
+  // The ra and pc are initialized to a known bad value that will cause an
+  // access violation if the simulator ever tries to execute it.
+  registers_[pc] = bad_ra;
+  registers_[ra] = bad_ra;
+  InitializeCoverage();
+}
+
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator.  The external reference will be a function compiled for the
+// host architecture.  We need to call that function instead of trying to
+// execute it with the simulator.  We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator.  We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ public:
+  Redirection(void* external_function, bool fp_return)
+      : external_function_(external_function),
+        swi_instruction_(rtCallRedirInstr),
+        fp_return_(fp_return),
+        next_(list_) {
+    list_ = this;
+  }
+
+  void* address_of_swi_instruction() {
+    return reinterpret_cast<void*>(&swi_instruction_);
+  }
+
+  void* external_function() { return external_function_; }
+  bool fp_return() { return fp_return_; }
+
+  static Redirection* Get(void* external_function, bool fp_return) {
+    Redirection* current;
+    for (current = list_; current != NULL; current = current->next_) {
+      if (current->external_function_ == external_function) return current;
+    }
+    return new Redirection(external_function, fp_return);
+  }
+
+  static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
+    char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
+    char* addr_of_redirection =
+        addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+    return reinterpret_cast<Redirection*>(addr_of_redirection);
+  }
+
+ private:
+  void* external_function_;
+  uint32_t swi_instruction_;
+  bool fp_return_;
+  Redirection* next_;
+  static Redirection* list_;
+};
+
+
+Redirection* Redirection::list_ = NULL;
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+                                           bool fp_return) {
+  Redirection* redirection = Redirection::Get(external_function, fp_return);
+  return redirection->address_of_swi_instruction();
+}
+
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current() {
+  Initialize();
+  Simulator* sim = reinterpret_cast<Simulator*>(
+      v8::internal::Thread::GetThreadLocal(simulator_key));
+  if (sim == NULL) {
+    // TODO(146): delete the simulator object when a thread goes away.
+    sim = new Simulator();
+    v8::internal::Thread::SetThreadLocal(simulator_key, sim);
+  }
+  return sim;
+}
+
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::set_register(int reg, int32_t value) {
+  ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+  if (reg == pc) {
+    pc_modified_ = true;
+  }
+
+  // zero register always hold 0.
+  registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::set_fpu_register(int fpureg, int32_t value) {
+  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  FPUregisters_[fpureg] = value;
+}
+
+void Simulator::set_fpu_register_double(int fpureg, double value) {
+  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+  *v8i::bit_cast<double*, int32_t*>(&FPUregisters_[fpureg]) = value;
+}
+
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int32_t Simulator::get_register(int reg) const {
+  ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+  if (reg == 0)
+    return 0;
+  else
+    return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
+}
+
+int32_t Simulator::get_fpu_register(int fpureg) const {
+  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  return FPUregisters_[fpureg];
+}
+
+double Simulator::get_fpu_register_double(int fpureg) const {
+  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+  return *v8i::bit_cast<double*, int32_t*>(
+      const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int32_t value) {
+  pc_modified_ = true;
+  registers_[pc] = value;
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int32_t Simulator::get_pc() const {
+  return registers_[pc];
+}
+
+
+// The MIPS cannot do unaligned reads and writes.  On some MIPS platforms an
+// interrupt is caused.  On others it does a funky rotation thing.  For now we
+// simply disallow unaligned reads, but at some point we may want to move to
+// emulating the rotate behaviour.  Note that simulator runs have the runtime
+// system running directly on the host system and only generated code is
+// executed in the simulator.  Since the host is typically IA32 we will not
+// get the correct MIPS-like behaviour on unaligned accesses.
+
+int Simulator::ReadW(int32_t addr, Instruction* instr) {
+  if ((addr & v8i::kPointerAlignmentMask) == 0) {
+    intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+    return *ptr;
+  }
+  PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+  return 0;
+}
+
+
+void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
+  if ((addr & v8i::kPointerAlignmentMask) == 0) {
+    intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+}
+
+
+double Simulator::ReadD(int32_t addr, Instruction* instr) {
+  if ((addr & kDoubleAlignmentMask) == 0) {
+    double* ptr = reinterpret_cast<double*>(addr);
+    return *ptr;
+  }
+  PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+  return 0;
+}
+
+
+void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
+  if ((addr & kDoubleAlignmentMask) == 0) {
+    double* ptr = reinterpret_cast<double*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+}
+
+
+uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
+  if ((addr & 1) == 0) {
+    uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+    return *ptr;
+  }
+  PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+  return 0;
+}
+
+
+int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
+  if ((addr & 1) == 0) {
+    int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+    return *ptr;
+  }
+  PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+  return 0;
+}
+
+
+void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
+  if ((addr & 1) == 0) {
+    uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+}
+
+
+void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
+  if ((addr & 1) == 0) {
+    int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
+  OS::Abort();
+}
+
+
+uint32_t Simulator::ReadBU(int32_t addr) {
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+  return *ptr & 0xff;
+}
+
+
+int32_t Simulator::ReadB(int32_t addr) {
+  int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+  return ((*ptr << 24) >> 24) & 0xff;
+}
+
+
+void Simulator::WriteB(int32_t addr, uint8_t value) {
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+  *ptr = value;
+}
+
+
+void Simulator::WriteB(int32_t addr, int8_t value) {
+  int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+  *ptr = value;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+  // Leave a safety margin of 256 bytes to prevent overrunning the stack when
+  // pushing values.
+  return reinterpret_cast<uintptr_t>(stack_) + 256;
+}
+
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instruction* instr, const char* format) {
+  PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
+         instr, format);
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair which is essentially two 32-bit values stuffed into a
+// 64-bit value. With the code below we assume that all runtime calls return
+// 64 bits of result. If they don't, the r1 result register contains a bogus
+// value, which is fine because it is caller-saved.
+typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
+                                        int32_t arg1,
+                                        int32_t arg2,
+                                        int32_t arg3);
+typedef double (*SimulatorRuntimeFPCall)(double fparg0,
+                                         double fparg1);
+
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime.
+void Simulator::SoftwareInterrupt(Instruction* instr) {
+  // We first check if we met a call_rt_redirected.
+  if (instr->InstructionBits() == rtCallRedirInstr) {
+    Redirection* redirection = Redirection::FromSwiInstruction(instr);
+    int32_t arg0 = get_register(a0);
+    int32_t arg1 = get_register(a1);
+    int32_t arg2 = get_register(a2);
+    int32_t arg3 = get_register(a3);
+    // fp args are (not always) in f12 and f14.
+    // See MIPS conventions for more details.
+    double fparg0 = get_fpu_register_double(f12);
+    double fparg1 = get_fpu_register_double(f14);
+    // This is dodgy but it works because the C entry stubs are never moved.
+    // See comment in codegen-arm.cc and bug 1242173.
+    int32_t saved_ra = get_register(ra);
+    if (redirection->fp_return()) {
+      intptr_t external =
+          reinterpret_cast<intptr_t>(redirection->external_function());
+      SimulatorRuntimeFPCall target =
+          reinterpret_cast<SimulatorRuntimeFPCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Call to host function at %p with args %f, %f\n",
+               FUNCTION_ADDR(target), fparg0, fparg1);
+      }
+      double result = target(fparg0, fparg1);
+      set_fpu_register_double(f0, result);
+    } else {
+      intptr_t external =
+          reinterpret_cast<int32_t>(redirection->external_function());
+      SimulatorRuntimeCall target =
+          reinterpret_cast<SimulatorRuntimeCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF(
+            "Call to host function at %p with args %08x, %08x, %08x, %08x\n",
+            FUNCTION_ADDR(target),
+            arg0,
+            arg1,
+            arg2,
+            arg3);
+      }
+      int64_t result = target(arg0, arg1, arg2, arg3);
+      int32_t lo_res = static_cast<int32_t>(result);
+      int32_t hi_res = static_cast<int32_t>(result >> 32);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Returned %08x\n", lo_res);
+      }
+      set_register(v0, lo_res);
+      set_register(v1, hi_res);
+    }
+    set_register(ra, saved_ra);
+    set_pc(get_register(ra));
+  } else {
+    Debugger dbg(this);
+    dbg.Debug();
+  }
+}
+
+void Simulator::SignalExceptions() {
+  for (int i = 1; i < kNumExceptions; i++) {
+    if (exceptions[i] != 0) {
+      V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.", i);
+    }
+  }
+}
+
+// Handle execution based on instruction types.
+void Simulator::DecodeTypeRegister(Instruction* instr) {
+  // Instruction fields
+  Opcode   op     = instr->OpcodeFieldRaw();
+  int32_t  rs_reg = instr->RsField();
+  int32_t  rs     = get_register(rs_reg);
+  uint32_t rs_u   = static_cast<uint32_t>(rs);
+  int32_t  rt_reg = instr->RtField();
+  int32_t  rt     = get_register(rt_reg);
+  uint32_t rt_u   = static_cast<uint32_t>(rt);
+  int32_t  rd_reg = instr->RdField();
+  uint32_t sa     = instr->SaField();
+
+  int32_t fs_reg= instr->FsField();
+
+  // ALU output
+  // It should not be used as is. Instructions using it should always initialize
+  // it first.
+  int32_t alu_out = 0x12345678;
+  // Output or temporary for floating point.
+  double fp_out = 0.0;
+
+  // For break and trap instructions.
+  bool do_interrupt = false;
+
+  // For jr and jalr
+  // Get current pc.
+  int32_t current_pc = get_pc();
+  // Next pc
+  int32_t next_pc = 0;
+
+  // ---------- Configuration
+  switch (op) {
+    case COP1:    // Coprocessor instructions
+      switch (instr->RsFieldRaw()) {
+        case BC1:   // branch on coprocessor condition
+          UNREACHABLE();
+          break;
+        case MFC1:
+          alu_out = get_fpu_register(fs_reg);
+          break;
+        case MFHC1:
+          fp_out = get_fpu_register_double(fs_reg);
+          alu_out = *v8i::bit_cast<int32_t*, double*>(&fp_out);
+          break;
+        case MTC1:
+        case MTHC1:
+          // Do the store in the execution step.
+          break;
+        case S:
+        case D:
+        case W:
+        case L:
+        case PS:
+          // Do everything in the execution step.
+          break;
+        default:
+          UNIMPLEMENTED_MIPS();
+      };
+      break;
+    case SPECIAL:
+      switch (instr->FunctionFieldRaw()) {
+        case JR:
+        case JALR:
+          next_pc = get_register(instr->RsField());
+          break;
+        case SLL:
+          alu_out = rt << sa;
+          break;
+        case SRL:
+          alu_out = rt_u >> sa;
+          break;
+        case SRA:
+          alu_out = rt >> sa;
+          break;
+        case SLLV:
+          alu_out = rt << rs;
+          break;
+        case SRLV:
+          alu_out = rt_u >> rs;
+          break;
+        case SRAV:
+          alu_out = rt >> rs;
+          break;
+        case MFHI:
+          alu_out = get_register(HI);
+          break;
+        case MFLO:
+          alu_out = get_register(LO);
+          break;
+        case MULT:
+          UNIMPLEMENTED_MIPS();
+          break;
+        case MULTU:
+          UNIMPLEMENTED_MIPS();
+          break;
+        case DIV:
+        case DIVU:
+            exceptions[kDivideByZero] = rt == 0;
+          break;
+        case ADD:
+          if (HaveSameSign(rs, rt)) {
+            if (rs > 0) {
+              exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - rt);
+            } else if (rs < 0) {
+              exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt);
+            }
+          }
+          alu_out = rs + rt;
+          break;
+        case ADDU:
+          alu_out = rs + rt;
+          break;
+        case SUB:
+          if (!HaveSameSign(rs, rt)) {
+            if (rs > 0) {
+              exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue + rt);
+            } else if (rs < 0) {
+              exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt);
+            }
+          }
+          alu_out = rs - rt;
+          break;
+        case SUBU:
+          alu_out = rs - rt;
+          break;
+        case AND:
+          alu_out = rs & rt;
+          break;
+        case OR:
+          alu_out = rs | rt;
+          break;
+        case XOR:
+          alu_out = rs ^ rt;
+          break;
+        case NOR:
+          alu_out = ~(rs | rt);
+          break;
+        case SLT:
+          alu_out = rs < rt ? 1 : 0;
+          break;
+        case SLTU:
+          alu_out = rs_u < rt_u ? 1 : 0;
+          break;
+        // Break and trap instructions
+        case BREAK:
+          do_interrupt = true;
+          break;
+        case TGE:
+          do_interrupt = rs >= rt;
+          break;
+        case TGEU:
+          do_interrupt = rs_u >= rt_u;
+          break;
+        case TLT:
+          do_interrupt = rs < rt;
+          break;
+        case TLTU:
+          do_interrupt = rs_u < rt_u;
+          break;
+        case TEQ:
+          do_interrupt = rs == rt;
+          break;
+        case TNE:
+          do_interrupt = rs != rt;
+          break;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    case SPECIAL2:
+      switch (instr->FunctionFieldRaw()) {
+        case MUL:
+          alu_out = rs_u * rt_u;  // Only the lower 32 bits are kept.
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    default:
+      UNREACHABLE();
+  };
+
+  // ---------- Raise exceptions triggered.
+  SignalExceptions();
+
+  // ---------- Execution
+  switch (op) {
+    case COP1:
+      switch (instr->RsFieldRaw()) {
+        case BC1:   // branch on coprocessor condition
+          UNREACHABLE();
+          break;
+        case MFC1:
+        case MFHC1:
+          set_register(rt_reg, alu_out);
+          break;
+        case MTC1:
+          // We don't need to set the higher bits to 0, because MIPS ISA says
+          // they are in an unpredictable state after executing MTC1.
+          FPUregisters_[fs_reg] = registers_[rt_reg];
+          FPUregisters_[fs_reg+1] = Unpredictable;
+          break;
+        case MTHC1:
+          // Here we need to keep the lower bits unchanged.
+          FPUregisters_[fs_reg+1] = registers_[rt_reg];
+          break;
+        case S:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_D_S:
+            case CVT_W_S:
+            case CVT_L_S:
+            case CVT_PS_S:
+              UNIMPLEMENTED_MIPS();
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case D:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_S_D:
+            case CVT_W_D:
+            case CVT_L_D:
+              UNIMPLEMENTED_MIPS();
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case W:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_S_W:
+              UNIMPLEMENTED_MIPS();
+              break;
+            case CVT_D_W:   // Convert word to double.
+              set_fpu_register(rd_reg, static_cast<double>(rs));
+              break;
+            default:
+              UNREACHABLE();
+          };
+          break;
+        case L:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_S_L:
+            case CVT_D_L:
+              UNIMPLEMENTED_MIPS();
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case PS:
+          break;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    case SPECIAL:
+      switch (instr->FunctionFieldRaw()) {
+        case JR: {
+          Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
+              current_pc+Instruction::kInstructionSize);
+          BranchDelayInstructionDecode(branch_delay_instr);
+          set_pc(next_pc);
+          pc_modified_ = true;
+          break;
+        }
+        case JALR: {
+          Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
+              current_pc+Instruction::kInstructionSize);
+          BranchDelayInstructionDecode(branch_delay_instr);
+          set_register(31, current_pc + 2* Instruction::kInstructionSize);
+          set_pc(next_pc);
+          pc_modified_ = true;
+          break;
+        }
+        // Instructions using HI and LO registers.
+        case MULT:
+        case MULTU:
+          break;
+        case DIV:
+          // Divide by zero was checked in the configuration step.
+          set_register(LO, rs / rt);
+          set_register(HI, rs % rt);
+          break;
+        case DIVU:
+          set_register(LO, rs_u / rt_u);
+          set_register(HI, rs_u % rt_u);
+          break;
+        // Break and trap instructions
+        case BREAK:
+        case TGE:
+        case TGEU:
+        case TLT:
+        case TLTU:
+        case TEQ:
+        case TNE:
+          if (do_interrupt) {
+            SoftwareInterrupt(instr);
+          }
+          break;
+        default:  // For other special opcodes we do the default operation.
+          set_register(rd_reg, alu_out);
+      };
+      break;
+    case SPECIAL2:
+      switch (instr->FunctionFieldRaw()) {
+        case MUL:
+          set_register(rd_reg, alu_out);
+          // HI and LO are UNPREDICTABLE after the operation.
+          set_register(LO, Unpredictable);
+          set_register(HI, Unpredictable);
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    // Unimplemented opcodes raised an error in the configuration step before,
+    // so we can use the default here to set the destination register in common
+    // cases.
+    default:
+      set_register(rd_reg, alu_out);
+  };
+}
+
+// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq)
+void Simulator::DecodeTypeImmediate(Instruction* instr) {
+  // Instruction fields
+  Opcode   op     = instr->OpcodeFieldRaw();
+  int32_t  rs     = get_register(instr->RsField());
+  uint32_t rs_u   = static_cast<uint32_t>(rs);
+  int32_t  rt_reg = instr->RtField();  // destination register
+  int32_t  rt     = get_register(rt_reg);
+  int16_t  imm16  = instr->Imm16Field();
+
+  int32_t  ft_reg = instr->FtField();  // destination register
+  int32_t  ft     = get_register(ft_reg);
+
+  // zero extended immediate
+  uint32_t  oe_imm16 = 0xffff & imm16;
+  // sign extended immediate
+  int32_t   se_imm16 = imm16;
+
+  // Get current pc.
+  int32_t current_pc = get_pc();
+  // Next pc.
+  int32_t next_pc = bad_ra;
+
+  // Used for conditional branch instructions
+  bool do_branch = false;
+  bool execute_branch_delay_instruction = false;
+
+  // Used for arithmetic instructions
+  int32_t alu_out = 0;
+  // Floating point
+  double fp_out = 0.0;
+
+  // Used for memory instructions
+  int32_t addr = 0x0;
+
+  // ---------- Configuration (and execution for REGIMM)
+  switch (op) {
+    // ------------- COP1. Coprocessor instructions
+    case COP1:
+      switch (instr->RsFieldRaw()) {
+        case BC1:   // branch on coprocessor condition
+          UNIMPLEMENTED_MIPS();
+          break;
+        default:
+          UNREACHABLE();
+      };
+      break;
+    // ------------- REGIMM class
+    case REGIMM:
+      switch (instr->RtFieldRaw()) {
+        case BLTZ:
+          do_branch = (rs  < 0);
+          break;
+        case BLTZAL:
+          do_branch = rs  < 0;
+          break;
+        case BGEZ:
+          do_branch = rs >= 0;
+          break;
+        case BGEZAL:
+          do_branch = rs >= 0;
+          break;
+        default:
+          UNREACHABLE();
+      };
+      switch (instr->RtFieldRaw()) {
+        case BLTZ:
+        case BLTZAL:
+        case BGEZ:
+        case BGEZAL:
+          // Branch instructions common part.
+          execute_branch_delay_instruction = true;
+          // Set next_pc
+          if (do_branch) {
+            next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+            if (instr->IsLinkingInstruction()) {
+              set_register(31, current_pc + kBranchReturnOffset);
+            }
+          } else {
+            next_pc = current_pc + kBranchReturnOffset;
+          }
+        default:
+          break;
+        };
+    break;  // case REGIMM
+    // ------------- Branch instructions
+    // When comparing to zero, the encoding of rt field is always 0, so we don't
+    // need to replace rt with zero.
+    case BEQ:
+      do_branch = (rs == rt);
+      break;
+    case BNE:
+      do_branch = rs != rt;
+      break;
+    case BLEZ:
+      do_branch = rs <= 0;
+      break;
+    case BGTZ:
+      do_branch = rs  > 0;
+      break;
+    // ------------- Arithmetic instructions
+    case ADDI:
+      if (HaveSameSign(rs, se_imm16)) {
+        if (rs > 0) {
+          exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - se_imm16);
+        } else if (rs < 0) {
+          exceptions[kIntegerUnderflow] =
+              rs < (Registers::kMinValue - se_imm16);
+        }
+      }
+      alu_out = rs + se_imm16;
+      break;
+    case ADDIU:
+      alu_out = rs + se_imm16;
+      break;
+    case SLTI:
+      alu_out = (rs < se_imm16) ? 1 : 0;
+      break;
+    case SLTIU:
+      alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
+      break;
+    case ANDI:
+        alu_out = rs & oe_imm16;
+      break;
+    case ORI:
+        alu_out = rs | oe_imm16;
+      break;
+    case XORI:
+        alu_out = rs ^ oe_imm16;
+      break;
+    case LUI:
+        alu_out = (oe_imm16 << 16);
+      break;
+    // ------------- Memory instructions
+    case LB:
+      addr = rs + se_imm16;
+      alu_out = ReadB(addr);
+      break;
+    case LW:
+      addr = rs + se_imm16;
+      alu_out = ReadW(addr, instr);
+      break;
+    case LBU:
+      addr = rs + se_imm16;
+      alu_out = ReadBU(addr);
+      break;
+    case SB:
+      addr = rs + se_imm16;
+      break;
+    case SW:
+      addr = rs + se_imm16;
+      break;
+    case LWC1:
+      addr = rs + se_imm16;
+      alu_out = ReadW(addr, instr);
+      break;
+    case LDC1:
+      addr = rs + se_imm16;
+      fp_out = ReadD(addr, instr);
+      break;
+    case SWC1:
+    case SDC1:
+      addr = rs + se_imm16;
+      break;
+    default:
+      UNREACHABLE();
+  };
+
+  // ---------- Raise exceptions triggered.
+  SignalExceptions();
+
+  // ---------- Execution
+  switch (op) {
+    // ------------- Branch instructions
+    case BEQ:
+    case BNE:
+    case BLEZ:
+    case BGTZ:
+      // Branch instructions common part.
+      execute_branch_delay_instruction = true;
+      // Set next_pc
+      if (do_branch) {
+        next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+        if (instr->IsLinkingInstruction()) {
+          set_register(31, current_pc + 2* Instruction::kInstructionSize);
+        }
+      } else {
+        next_pc = current_pc + 2 * Instruction::kInstructionSize;
+      }
+      break;
+    // ------------- Arithmetic instructions
+    case ADDI:
+    case ADDIU:
+    case SLTI:
+    case SLTIU:
+    case ANDI:
+    case ORI:
+    case XORI:
+    case LUI:
+      set_register(rt_reg, alu_out);
+      break;
+    // ------------- Memory instructions
+    case LB:
+    case LW:
+    case LBU:
+      set_register(rt_reg, alu_out);
+      break;
+    case SB:
+      WriteB(addr, static_cast<int8_t>(rt));
+      break;
+    case SW:
+      WriteW(addr, rt, instr);
+      break;
+    case LWC1:
+      set_fpu_register(ft_reg, alu_out);
+      break;
+    case LDC1:
+      set_fpu_register_double(ft_reg, fp_out);
+      break;
+    case SWC1:
+      addr = rs + se_imm16;
+      WriteW(addr, get_fpu_register(ft_reg), instr);
+      break;
+    case SDC1:
+      addr = rs + se_imm16;
+      WriteD(addr, ft, instr);
+      break;
+    default:
+      break;
+  };
+
+
+  if (execute_branch_delay_instruction) {
+    // Execute branch delay slot
+    // We don't check for end_sim_pc. First it should not be met as the current
+    // pc is valid. Secondly a jump should always execute its branch delay slot.
+    Instruction* branch_delay_instr =
+      reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+    BranchDelayInstructionDecode(branch_delay_instr);
+  }
+
+  // If needed update pc after the branch delay execution.
+  if (next_pc != bad_ra) {
+    set_pc(next_pc);
+  }
+}
+
+// Type 3: instructions using a 26 bytes immediate. (eg: j, jal)
+void Simulator::DecodeTypeJump(Instruction* instr) {
+  // Get current pc.
+  int32_t current_pc = get_pc();
+  // Get unchanged bits of pc.
+  int32_t pc_high_bits = current_pc & 0xf0000000;
+  // Next pc
+  int32_t next_pc = pc_high_bits | (instr->Imm26Field() << 2);
+
+  // Execute branch delay slot
+  // We don't check for end_sim_pc. First it should not be met as the current pc
+  // is valid. Secondly a jump should always execute its branch delay slot.
+  Instruction* branch_delay_instr =
+    reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+  BranchDelayInstructionDecode(branch_delay_instr);
+
+  // Update pc and ra if necessary.
+  // Do this after the branch delay execution.
+  if (instr->IsLinkingInstruction()) {
+    set_register(31, current_pc + 2* Instruction::kInstructionSize);
+  }
+  set_pc(next_pc);
+  pc_modified_ = true;
+}
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instruction* instr) {
+  pc_modified_ = false;
+  if (::v8::internal::FLAG_trace_sim) {
+    disasm::NameConverter converter;
+    disasm::Disassembler dasm(converter);
+    // use a reasonably large buffer
+    v8::internal::EmbeddedVector<char, 256> buffer;
+    dasm.InstructionDecode(buffer,
+                           reinterpret_cast<byte_*>(instr));
+    PrintF("  0x%08x  %s\n", instr, buffer.start());
+  }
+
+  switch (instr->InstructionType()) {
+    case Instruction::kRegisterType:
+      DecodeTypeRegister(instr);
+      break;
+    case Instruction::kImmediateType:
+      DecodeTypeImmediate(instr);
+      break;
+    case Instruction::kJumpType:
+      DecodeTypeJump(instr);
+      break;
+    default:
+      UNSUPPORTED();
+  }
+  if (!pc_modified_) {
+    set_register(pc, reinterpret_cast<int32_t>(instr) +
+                 Instruction::kInstructionSize);
+  }
+}
+
+
+
+void Simulator::Execute() {
+  // Get the PC to simulate. Cannot use the accessor here as we need the
+  // raw PC value and not the one used as input to arithmetic instructions.
+  int program_counter = get_pc();
+  if (::v8::internal::FLAG_stop_sim_at == 0) {
+    // Fast version of the dispatch loop without checking whether the simulator
+    // should be stopping at a particular executed instruction.
+    while (program_counter != end_sim_pc) {
+      Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+      icount_++;
+      InstructionDecode(instr);
+      program_counter = get_pc();
+    }
+  } else {
+    // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+    // we reach the particular instuction count.
+    while (program_counter != end_sim_pc) {
+      Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+      icount_++;
+      if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
+        Debugger dbg(this);
+        dbg.Debug();
+      } else {
+        InstructionDecode(instr);
+      }
+      program_counter = get_pc();
+    }
+  }
+}
+
+
+int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
+  va_list parameters;
+  va_start(parameters, argument_count);
+  // Setup arguments
+
+  // First four arguments passed in registers.
+  ASSERT(argument_count >= 4);
+  set_register(a0, va_arg(parameters, int32_t));
+  set_register(a1, va_arg(parameters, int32_t));
+  set_register(a2, va_arg(parameters, int32_t));
+  set_register(a3, va_arg(parameters, int32_t));
+
+  // Remaining arguments passed on stack.
+  int original_stack = get_register(sp);
+  // Compute position of stack on entry to generated code.
+  int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
+                                    - kArgsSlotsSize);
+  if (OS::ActivationFrameAlignment() != 0) {
+    entry_stack &= -OS::ActivationFrameAlignment();
+  }
+  // Store remaining arguments on stack, from low to high memory.
+  intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+  for (int i = 4; i < argument_count; i++) {
+    stack_argument[i - 4 + kArgsSlotsNum] = va_arg(parameters, int32_t);
+  }
+  va_end(parameters);
+  set_register(sp, entry_stack);
+
+  // Prepare to execute the code at entry
+  set_register(pc, reinterpret_cast<int32_t>(entry));
+  // Put down marker for end of simulation. The simulator will stop simulation
+  // when the PC reaches this value. By saving the "end simulation" value into
+  // the LR the simulation stops when returning to this call point.
+  set_register(ra, end_sim_pc);
+
+  // Remember the values of callee-saved registers.
+  // The code below assumes that r9 is not used as sb (static base) in
+  // simulator code and therefore is regarded as a callee-saved register.
+  int32_t s0_val = get_register(s0);
+  int32_t s1_val = get_register(s1);
+  int32_t s2_val = get_register(s2);
+  int32_t s3_val = get_register(s3);
+  int32_t s4_val = get_register(s4);
+  int32_t s5_val = get_register(s5);
+  int32_t s6_val = get_register(s6);
+  int32_t s7_val = get_register(s7);
+  int32_t gp_val = get_register(gp);
+  int32_t sp_val = get_register(sp);
+  int32_t fp_val = get_register(fp);
+
+  // Setup the callee-saved registers with a known value. To be able to check
+  // that they are preserved properly across JS execution.
+  int32_t callee_saved_value = icount_;
+  set_register(s0, callee_saved_value);
+  set_register(s1, callee_saved_value);
+  set_register(s2, callee_saved_value);
+  set_register(s3, callee_saved_value);
+  set_register(s4, callee_saved_value);
+  set_register(s5, callee_saved_value);
+  set_register(s6, callee_saved_value);
+  set_register(s7, callee_saved_value);
+  set_register(gp, callee_saved_value);
+  set_register(fp, callee_saved_value);
+
+  // Start the simulation
+  Execute();
+
+  // Check that the callee-saved registers have been preserved.
+  CHECK_EQ(callee_saved_value, get_register(s0));
+  CHECK_EQ(callee_saved_value, get_register(s1));
+  CHECK_EQ(callee_saved_value, get_register(s2));
+  CHECK_EQ(callee_saved_value, get_register(s3));
+  CHECK_EQ(callee_saved_value, get_register(s4));
+  CHECK_EQ(callee_saved_value, get_register(s5));
+  CHECK_EQ(callee_saved_value, get_register(s6));
+  CHECK_EQ(callee_saved_value, get_register(s7));
+  CHECK_EQ(callee_saved_value, get_register(gp));
+  CHECK_EQ(callee_saved_value, get_register(fp));
+
+  // Restore callee-saved registers with the original value.
+  set_register(s0, s0_val);
+  set_register(s1, s1_val);
+  set_register(s2, s2_val);
+  set_register(s3, s3_val);
+  set_register(s4, s4_val);
+  set_register(s5, s5_val);
+  set_register(s6, s6_val);
+  set_register(s7, s7_val);
+  set_register(gp, gp_val);
+  set_register(sp, sp_val);
+  set_register(fp, fp_val);
+
+  // Pop stack passed arguments.
+  CHECK_EQ(entry_stack, get_register(sp));
+  set_register(sp, original_stack);
+
+  int32_t result = get_register(v0);
+  return result;
+}
+
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+  int new_sp = get_register(sp) - sizeof(uintptr_t);
+  uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+  *stack_slot = address;
+  set_register(sp, new_sp);
+  return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+  int current_sp = get_register(sp);
+  uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+  uintptr_t address = *stack_slot;
+  set_register(sp, current_sp + sizeof(uintptr_t));
+  return address;
+}
+
+
+#undef UNSUPPORTED
+
+} }  // namespace assembler::mips
+
+#endif  // !defined(__mips)
+
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
new file mode 100644
index 0000000..d5dfc30
--- /dev/null
+++ b/src/mips/simulator-mips.h
@@ -0,0 +1,311 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Declares a Simulator for MIPS instructions if we are not generating a native
+// MIPS binary. This Simulator allows us to run and debug MIPS code generation
+// on regular desktop machines.
+// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// which will start execution in the Simulator or forwards to the real entry
+// on a MIPS HW platform.
+
+#ifndef V8_MIPS_SIMULATOR_MIPS_H_
+#define V8_MIPS_SIMULATOR_MIPS_H_
+
+#include "allocation.h"
+
+#if defined(__mips)
+
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+  entry(p0, p1, p2, p3, p4);
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on mips uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return c_limit;
+  }
+
+  static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+    return try_catch_address;
+  }
+
+  static inline void UnregisterCTryCatch() { }
+};
+
+// Calculated the stack limit beyond which we will throw stack overflow errors.
+// This macro must be called from a C++ method. It relies on being able to take
+// the address of "this" to get a value on the current execution stack and then
+// calculates the stack limit based on that value.
+// NOTE: The check for overflow is not safe as there is no guarantee that the
+// running thread has its stack in all memory up to address 0x00000000.
+#define GENERATED_CODE_STACK_LIMIT(limit) \
+  (reinterpret_cast<uintptr_t>(this) >= limit ? \
+      reinterpret_cast<uintptr_t>(this) - limit : 0)
+
+// Call the generated regexp code directly. The entry function pointer should
+// expect seven int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+  entry(p0, p1, p2, p3, p4, p5, p6)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+  reinterpret_cast<TryCatch*>(try_catch_address)
+
+
+#else   // #if defined(__mips)
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+  reinterpret_cast<Object*>(\
+      assembler::mips::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
+                                                  p0, p1, p2, p3, p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+  assembler::mips::Simulator::current()->Call(\
+    FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+  try_catch_address == NULL ? \
+      NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+
+
+namespace assembler {
+namespace mips {
+
+class Simulator {
+ public:
+  friend class Debugger;
+
+  // Registers are declared in order. See SMRL chapter 2.
+  enum Register {
+    no_reg = -1,
+    zero_reg = 0,
+    at,
+    v0, v1,
+    a0, a1, a2, a3,
+    t0, t1, t2, t3, t4, t5, t6, t7,
+    s0, s1, s2, s3, s4, s5, s6, s7,
+    t8, t9,
+    k0, k1,
+    gp,
+    sp,
+    s8,
+    ra,
+    // LO, HI, and pc
+    LO,
+    HI,
+    pc,   // pc must be the last register.
+    kNumSimuRegisters,
+    // aliases
+    fp = s8
+  };
+
+  // Coprocessor registers.
+  // Generated code will always use doubles. So we will only use even registers.
+  enum FPURegister {
+    f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
+    f12, f13, f14, f15,   // f12 and f14 are arguments FPURegisters
+    f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
+    f26, f27, f28, f29, f30, f31,
+    kNumFPURegisters
+  };
+
+  Simulator();
+  ~Simulator();
+
+  // The currently executing Simulator instance. Potentially there can be one
+  // for each native thread.
+  static Simulator* current();
+
+  // Accessors for register state. Reading the pc value adheres to the MIPS
+  // architecture specification and is off by a 8 from the currently executing
+  // instruction.
+  void set_register(int reg, int32_t value);
+  int32_t get_register(int reg) const;
+  // Same for FPURegisters
+  void set_fpu_register(int fpureg, int32_t value);
+  void set_fpu_register_double(int fpureg, double value);
+  int32_t get_fpu_register(int fpureg) const;
+  double get_fpu_register_double(int fpureg) const;
+
+  // Special case of set_register and get_register to access the raw PC value.
+  void set_pc(int32_t value);
+  int32_t get_pc() const;
+
+  // Accessor to the internal simulator stack area.
+  uintptr_t StackLimit() const;
+
+  // Executes MIPS instructions until the PC reaches end_sim_pc.
+  void Execute();
+
+  // Call on program start.
+  static void Initialize();
+
+  // V8 generally calls into generated JS code with 5 parameters and into
+  // generated RegExp code with 7 parameters. This is a convenience function,
+  // which sets up the simulator state and grabs the result on return.
+  int32_t Call(byte_* entry, int argument_count, ...);
+
+  // Push an address onto the JS stack.
+  uintptr_t PushAddress(uintptr_t address);
+
+  // Pop an address from the JS stack.
+  uintptr_t PopAddress();
+
+ private:
+  enum special_values {
+    // Known bad pc value to ensure that the simulator does not execute
+    // without being properly setup.
+    bad_ra = -1,
+    // A pc value used to signal the simulator to stop execution.  Generally
+    // the ra is set to this value on transition from native C code to
+    // simulated execution, so that the simulator can "return" to the native
+    // C code.
+    end_sim_pc = -2,
+    // Unpredictable value.
+    Unpredictable = 0xbadbeaf
+  };
+
+  // Unsupported instructions use Format to print an error and stop execution.
+  void Format(Instruction* instr, const char* format);
+
+  // Read and write memory.
+  inline uint32_t ReadBU(int32_t addr);
+  inline int32_t ReadB(int32_t addr);
+  inline void WriteB(int32_t addr, uint8_t value);
+  inline void WriteB(int32_t addr, int8_t value);
+
+  inline uint16_t ReadHU(int32_t addr, Instruction* instr);
+  inline int16_t ReadH(int32_t addr, Instruction* instr);
+  // Note: Overloaded on the sign of the value.
+  inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
+  inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
+
+  inline int ReadW(int32_t addr, Instruction* instr);
+  inline void WriteW(int32_t addr, int value, Instruction* instr);
+
+  inline double ReadD(int32_t addr, Instruction* instr);
+  inline void WriteD(int32_t addr, double value, Instruction* instr);
+
+  // Operations depending on endianness.
+  // Get Double Higher / Lower word.
+  inline int32_t GetDoubleHIW(double* addr);
+  inline int32_t GetDoubleLOW(double* addr);
+  // Set Double Higher / Lower word.
+  inline int32_t SetDoubleHIW(double* addr);
+  inline int32_t SetDoubleLOW(double* addr);
+
+
+  // Executing is handled based on the instruction type.
+  void DecodeTypeRegister(Instruction* instr);
+  void DecodeTypeImmediate(Instruction* instr);
+  void DecodeTypeJump(Instruction* instr);
+
+  // Used for breakpoints and traps.
+  void SoftwareInterrupt(Instruction* instr);
+
+  // Executes one instruction.
+  void InstructionDecode(Instruction* instr);
+  // Execute one instruction placed in a branch delay slot.
+  void BranchDelayInstructionDecode(Instruction* instr) {
+    if (instr->IsForbiddenInBranchDelay()) {
+      V8_Fatal(__FILE__, __LINE__,
+               "Eror:Unexpected %i opcode in a branch delay slot.",
+               instr->OpcodeField());
+    }
+    InstructionDecode(instr);
+  }
+
+  enum Exception {
+    none,
+    kIntegerOverflow,
+    kIntegerUnderflow,
+    kDivideByZero,
+    kNumExceptions
+  };
+  int16_t exceptions[kNumExceptions];
+
+  // Exceptions.
+  void SignalExceptions();
+
+  // Runtime call support.
+  static void* RedirectExternalReference(void* external_function,
+                                         bool fp_return);
+
+  // Used for real time calls that takes two double values as arguments and
+  // returns a double.
+  void SetFpResult(double result);
+
+  // Architecture state.
+  // Registers.
+  int32_t registers_[kNumSimuRegisters];
+  // Coprocessor Registers.
+  int32_t FPUregisters_[kNumFPURegisters];
+
+  // Simulator support.
+  char* stack_;
+  bool pc_modified_;
+  int icount_;
+  static bool initialized_;
+
+  // Registered breakpoints.
+  Instruction* break_pc_;
+  Instr break_instr_;
+};
+
+} }   // namespace assembler::mips
+
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.  Setting the c_limit to indicate a very small
+// stack cause stack overflow errors, since the simulator ignores the input.
+// This is unlikely to be an issue in practice, though it might cause testing
+// trouble down the line.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return assembler::mips::Simulator::current()->StackLimit();
+  }
+
+  static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+    assembler::mips::Simulator* sim = assembler::mips::Simulator::current();
+    return sim->PushAddress(try_catch_address);
+  }
+
+  static inline void UnregisterCTryCatch() {
+    assembler::mips::Simulator::current()->PopAddress();
+  }
+};
+
+#endif  // defined(__mips)
+
+#endif  // V8_MIPS_SIMULATOR_MIPS_H_
+
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
new file mode 100644
index 0000000..a87a49b
--- /dev/null
+++ b/src/mips/stub-cache-mips.cc
@@ -0,0 +1,384 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ic-inl.h"
+#include "codegen-inl.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+                              Code::Flags flags,
+                              Register receiver,
+                              Register name,
+                              Register scratch,
+                              Register extra) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+                                                       int index,
+                                                       Register prototype) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+                                            Register dst, Register src,
+                                            JSObject* holder, int index) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+                                           Register receiver,
+                                           Register scratch,
+                                           Label* miss_label) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Generate code to load the length from a string object and return the length.
+// If the receiver object is not a string or a wrapped string object the
+// execution continues at the miss label. The register containing the
+// receiver is potentially clobbered.
+void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
+                                             Register receiver,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Label* miss) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x249);
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+                                                 Register receiver,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* miss_label) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Generate StoreField code, value is passed in r0 register.
+// After executing generated code, the receiver_reg and name_reg
+// may be clobbered.
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+                                      Builtins::Name storage_extend,
+                                      JSObject* object,
+                                      int index,
+                                      Map* transition,
+                                      Register receiver_reg,
+                                      Register name_reg,
+                                      Register scratch,
+                                      Label* miss_label) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(JSObject* object,
+                                       Register object_reg,
+                                       JSObject* holder,
+                                       Register holder_reg,
+                                       Register scratch,
+                                       String* name,
+                                       Label* miss) {
+  UNIMPLEMENTED_MIPS();
+  return at;    // UNIMPLEMENTED RETURN
+}
+
+
+void StubCompiler::GenerateLoadField(JSObject* object,
+                                     JSObject* holder,
+                                     Register receiver,
+                                     Register scratch1,
+                                     Register scratch2,
+                                     int index,
+                                     String* name,
+                                     Label* miss) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Object* value,
+                                        String* name,
+                                        Label* miss) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+bool StubCompiler::GenerateLoadCallback(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        AccessorInfo* callback,
+                                        String* name,
+                                        Label* miss,
+                                        Failure** failure) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x470);
+  return false;   // UNIMPLEMENTED RETURN
+}
+
+
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+                                           JSObject* holder,
+                                           LookupResult* lookup,
+                                           Register receiver,
+                                           Register name_reg,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           String* name,
+                                           Label* miss) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x505);
+}
+
+
+Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* CallStubCompiler::CompileCallField(Object* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* CallStubCompiler::CompileCallConstant(Object* object,
+                                              JSObject* holder,
+                                              JSFunction* function,
+                                              String* name,
+                                              CheckType check) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+                                                 JSObject* holder,
+                                                 String* name) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x782);
+  return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
+                                            GlobalObject* holder,
+                                            JSGlobalPropertyCell* cell,
+                                            JSFunction* function,
+                                            String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* object,
+                                             int index,
+                                             Map* transition,
+                                             String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+                                                AccessorInfo* callback,
+                                                String* name) {
+  UNIMPLEMENTED_MIPS();
+  __ break_(0x906);
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+                                                   String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+                                              JSGlobalPropertyCell* cell,
+                                              String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadCallback(String* name,
+                                              JSObject* object,
+                                              JSObject* holder,
+                                              AccessorInfo* callback) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+                                              JSObject* holder,
+                                              Object* value,
+                                              String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
+                                                 JSObject* holder,
+                                                 String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                            GlobalObject* holder,
+                                            JSGlobalPropertyCell* cell,
+                                            String* name,
+                                            bool is_dont_delete) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+                                                JSObject* receiver,
+                                                JSObject* holder,
+                                                int index) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   Object* value) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                      JSObject* holder,
+                                                      String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+// TODO(1224671): implement the fast case.
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+                                                  int index,
+                                                  Map* transition,
+                                                  String* name) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* ConstructStubCompiler::CompileConstructStub(
+    SharedFunctionInfo* shared) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/virtual-frame-mips.cc b/src/mips/virtual-frame-mips.cc
new file mode 100644
index 0000000..fad7ec4
--- /dev/null
+++ b/src/mips/virtual-frame-mips.cc
@@ -0,0 +1,240 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+#define __ ACCESS_MASM(masm())
+
+
+// On entry to a function, the virtual frame already contains the
+// receiver and the parameters.  All initial frame elements are in
+// memory.
+VirtualFrame::VirtualFrame()
+    : elements_(parameter_count() + local_count() + kPreallocatedElements),
+      stack_pointer_(parameter_count()) {  // 0-based index of TOS.
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SyncElementBelowStackPointer(int index) {
+  UNREACHABLE();
+}
+
+
+void VirtualFrame::SyncElementByPushing(int index) {
+  UNREACHABLE();
+}
+
+
+void VirtualFrame::SyncRange(int begin, int end) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::Enter() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::Exit() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::AllocateStackSlots() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+int VirtualFrame::InvalidateFrameSlotAt(int index) {
+  return kIllegalIndex;
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::RawCallStub(CodeStub* stub) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallAlignedRuntime(Runtime::Function* f, int arg_count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallAlignedRuntime(Runtime::FunctionId id, int arg_count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+                                 InvokeJSFlags flags,
+                                 Result* arg_count_register,
+                                 int arg_count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::RawCallCodeObject(Handle<Code> code,
+                                       RelocInfo::Mode rmode) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+                                  RelocInfo::Mode rmode,
+                                  int dropped_args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+                                  RelocInfo::Mode rmode,
+                                  Result* arg,
+                                  int dropped_args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallCodeObject(Handle<Code> code,
+                                  RelocInfo::Mode rmode,
+                                  Result* arg0,
+                                  Result* arg1,
+                                  int dropped_args,
+                                  bool set_auto_args_slots) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::Drop(int count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::DropFromVFrameOnly(int count) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+Result VirtualFrame::Pop() {
+  UNIMPLEMENTED_MIPS();
+  Result res = Result();
+  return res;    // UNIMPLEMENTED RETUR
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+void VirtualFrame::EmitMultiPop(RegList regs) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+  UNIMPLEMENTED_MIPS();
+}
+
+void VirtualFrame::EmitMultiPush(RegList regs) {
+  UNIMPLEMENTED_MIPS();
+}
+
+void VirtualFrame::EmitArgumentSlots(RegList reglist) {
+  UNIMPLEMENTED_MIPS();
+}
+
+#undef __
+
+} }  // namespace v8::internal
+
diff --git a/src/mips/virtual-frame-mips.h b/src/mips/virtual-frame-mips.h
new file mode 100644
index 0000000..79f973f
--- /dev/null
+++ b/src/mips/virtual-frame-mips.h
@@ -0,0 +1,548 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_MIPS_VIRTUAL_FRAME_MIPS_H_
+#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
+
+#include "register-allocator.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -------------------------------------------------------------------------
+// Virtual frames
+//
+// The virtual frame is an abstraction of the physical stack frame.  It
+// encapsulates the parameters, frame-allocated locals, and the expression
+// stack.  It supports push/pop operations on the expression stack, as well
+// as random access to the expression stack elements, locals, and
+// parameters.
+
+class VirtualFrame : public ZoneObject {
+ public:
+  // A utility class to introduce a scope where the virtual frame is
+  // expected to remain spilled.  The constructor spills the code
+  // generator's current frame, but no attempt is made to require it
+  // to stay spilled.  It is intended as documentation while the code
+  // generator is being transformed.
+  class SpilledScope BASE_EMBEDDED {
+   public:
+    SpilledScope() {}
+  };
+
+  // An illegal index into the virtual frame.
+  static const int kIllegalIndex = -1;
+
+  // Construct an initial virtual frame on entry to a JS function.
+  VirtualFrame();
+
+  // Construct a virtual frame as a clone of an existing one.
+  explicit VirtualFrame(VirtualFrame* original);
+
+  CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+  MacroAssembler* masm() { return cgen()->masm(); }
+
+  // Create a duplicate of an existing valid frame element.
+  FrameElement CopyElementAt(int index);
+
+  // The number of elements on the virtual frame.
+  int element_count() { return elements_.length(); }
+
+  // The height of the virtual expression stack.
+  int height() {
+    return element_count() - expression_base_index();
+  }
+
+  int register_location(int num) {
+    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+    return register_locations_[num];
+  }
+
+  int register_location(Register reg) {
+    return register_locations_[RegisterAllocator::ToNumber(reg)];
+  }
+
+  void set_register_location(Register reg, int index) {
+    register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+  }
+
+  bool is_used(int num) {
+    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+    return register_locations_[num] != kIllegalIndex;
+  }
+
+  bool is_used(Register reg) {
+    return register_locations_[RegisterAllocator::ToNumber(reg)]
+        != kIllegalIndex;
+  }
+
+  // Add extra in-memory elements to the top of the frame to match an actual
+  // frame (eg, the frame after an exception handler is pushed).  No code is
+  // emitted.
+  void Adjust(int count);
+
+  // Forget elements from the top of the frame to match an actual frame (eg,
+  // the frame after a runtime call).  No code is emitted.
+  void Forget(int count) {
+    ASSERT(count >= 0);
+    ASSERT(stack_pointer_ == element_count() - 1);
+    stack_pointer_ -= count;
+    // On mips, all elements are in memory, so there is no extra bookkeeping
+    // (registers, copies, etc.) beyond dropping the elements.
+    elements_.Rewind(stack_pointer_ + 1);
+  }
+
+  // Forget count elements from the top of the frame and adjust the stack
+  // pointer downward.  This is used, for example, before merging frames at
+  // break, continue, and return targets.
+  void ForgetElements(int count);
+
+  // Spill all values from the frame to memory.
+  void SpillAll();
+
+  // Spill all occurrences of a specific register from the frame.
+  void Spill(Register reg) {
+    if (is_used(reg)) SpillElementAt(register_location(reg));
+  }
+
+  // Spill all occurrences of an arbitrary register if possible.  Return the
+  // register spilled or no_reg if it was not possible to free any register
+  // (ie, they all have frame-external references).
+  Register SpillAnyRegister();
+
+  // Prepare this virtual frame for merging to an expected frame by
+  // performing some state changes that do not require generating
+  // code.  It is guaranteed that no code will be generated.
+  void PrepareMergeTo(VirtualFrame* expected);
+
+  // Make this virtual frame have a state identical to an expected virtual
+  // frame.  As a side effect, code may be emitted to make this frame match
+  // the expected one.
+  void MergeTo(VirtualFrame* expected);
+
+  // Detach a frame from its code generator, perhaps temporarily.  This
+  // tells the register allocator that it is free to use frame-internal
+  // registers.  Used when the code generator's frame is switched from this
+  // one to NULL by an unconditional jump.
+  void DetachFromCodeGenerator() {
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
+    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+      if (is_used(i)) cgen_allocator->Unuse(i);
+    }
+  }
+
+  // (Re)attach a frame to its code generator.  This informs the register
+  // allocator that the frame-internal register references are active again.
+  // Used when a code generator's frame is switched from NULL to this one by
+  // binding a label.
+  void AttachToCodeGenerator() {
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
+    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+      if (is_used(i)) cgen_allocator->Unuse(i);
+    }
+  }
+
+  // Emit code for the physical JS entry and exit frame sequences.  After
+  // calling Enter, the virtual frame is ready for use; and after calling
+  // Exit it should not be used.  Note that Enter does not allocate space in
+  // the physical frame for storing frame-allocated locals.
+  void Enter();
+  void Exit();
+
+  // Prepare for returning from the frame by spilling locals and
+  // dropping all non-locals elements in the virtual frame.  This
+  // avoids generating unnecessary merge code when jumping to the
+  // shared return site.  Emits code for spills.
+  void PrepareForReturn();
+
+  // Allocate and initialize the frame-allocated locals.
+  void AllocateStackSlots();
+
+  // The current top of the expression stack as an assembly operand.
+  MemOperand Top() { return MemOperand(sp, 0); }
+
+  // An element of the expression stack as an assembly operand.
+  MemOperand ElementAt(int index) {
+    return MemOperand(sp, index * kPointerSize);
+  }
+
+  // Random-access store to a frame-top relative frame element.  The result
+  // becomes owned by the frame and is invalidated.
+  void SetElementAt(int index, Result* value);
+
+  // Set a frame element to a constant.  The index is frame-top relative.
+  void SetElementAt(int index, Handle<Object> value) {
+    Result temp(value);
+    SetElementAt(index, &temp);
+  }
+
+  void PushElementAt(int index) {
+    PushFrameSlotAt(element_count() - index - 1);
+  }
+
+  // A frame-allocated local as an assembly operand.
+  MemOperand LocalAt(int index) {
+    ASSERT(0 <= index);
+    ASSERT(index < local_count());
+    return MemOperand(s8_fp, kLocal0Offset - index * kPointerSize);
+  }
+
+  // Push a copy of the value of a local frame slot on top of the frame.
+  void PushLocalAt(int index) {
+    PushFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the value of a local frame slot on top of the frame and invalidate
+  // the local slot.  The slot should be written to before trying to read
+  // from it again.
+  void TakeLocalAt(int index) {
+    TakeFrameSlotAt(local0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a local frame slot.  The
+  // value is left in place on top of the frame.
+  void StoreToLocalAt(int index) {
+    StoreToFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the address of the receiver slot on the frame.
+  void PushReceiverSlotAddress();
+
+  // The function frame slot.
+  MemOperand Function() { return MemOperand(s8_fp, kFunctionOffset); }
+
+  // Push the function on top of the frame.
+  void PushFunction() { PushFrameSlotAt(function_index()); }
+
+  // The context frame slot.
+  MemOperand Context() { return MemOperand(s8_fp, kContextOffset); }
+
+  // Save the value of the cp register to the context frame slot.
+  void SaveContextRegister();
+
+  // Restore the cp register from the value of the context frame
+  // slot.
+  void RestoreContextRegister();
+
+  // A parameter as an assembly operand.
+  MemOperand ParameterAt(int index) {
+    // Index -1 corresponds to the receiver.
+    ASSERT(-1 <= index);  // -1 is the receiver.
+    ASSERT(index <= parameter_count());
+    uint16_t a = 0;   // Number of argument slots.
+    return MemOperand(s8_fp, (1 + parameter_count() + a - index) *kPointerSize);
+  }
+
+  // Push a copy of the value of a parameter frame slot on top of the frame.
+  void PushParameterAt(int index) {
+    PushFrameSlotAt(param0_index() + index);
+  }
+
+  // Push the value of a paramter frame slot on top of the frame and
+  // invalidate the parameter slot.  The slot should be written to before
+  // trying to read from it again.
+  void TakeParameterAt(int index) {
+    TakeFrameSlotAt(param0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a parameter frame slot.
+  // The value is left in place on top of the frame.
+  void StoreToParameterAt(int index) {
+    StoreToFrameSlotAt(param0_index() + index);
+  }
+
+  // The receiver frame slot.
+  MemOperand Receiver() { return ParameterAt(-1); }
+
+  // Push a try-catch or try-finally handler on top of the virtual frame.
+  void PushTryHandler(HandlerType type);
+
+  // Call stub given the number of arguments it expects on (and
+  // removes from) the stack.
+  void CallStub(CodeStub* stub, int arg_count) {
+    PrepareForCall(arg_count, arg_count);
+    RawCallStub(stub);
+  }
+
+  // Call stub that expects its argument in r0.  The argument is given
+  // as a result which must be the register r0.
+  void CallStub(CodeStub* stub, Result* arg);
+
+  // Call stub that expects its arguments in r1 and r0.  The arguments
+  // are given as results which must be the appropriate registers.
+  void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
+
+  // Call runtime given the number of arguments expected on (and
+  // removed from) the stack.
+  void CallRuntime(Runtime::Function* f, int arg_count);
+  void CallRuntime(Runtime::FunctionId id, int arg_count);
+
+  // Call runtime with sp aligned to 8 bytes.
+  void CallAlignedRuntime(Runtime::Function* f, int arg_count);
+  void CallAlignedRuntime(Runtime::FunctionId id, int arg_count);
+
+  // Invoke builtin given the number of arguments it expects on (and
+  // removes from) the stack.
+  void InvokeBuiltin(Builtins::JavaScript id,
+                     InvokeJSFlags flag,
+                     Result* arg_count_register,
+                     int arg_count);
+
+  // Call into an IC stub given the number of arguments it removes
+  // from the stack.  Register arguments are passed as results and
+  // consumed by the call.
+  void CallCodeObject(Handle<Code> ic,
+                      RelocInfo::Mode rmode,
+                      int dropped_args);
+  void CallCodeObject(Handle<Code> ic,
+                      RelocInfo::Mode rmode,
+                      Result* arg,
+                      int dropped_args);
+  void CallCodeObject(Handle<Code> ic,
+                      RelocInfo::Mode rmode,
+                      Result* arg0,
+                      Result* arg1,
+                      int dropped_args,
+                      bool set_auto_args_slots = false);
+
+  // Drop a number of elements from the top of the expression stack.  May
+  // emit code to affect the physical frame.  Does not clobber any registers
+  // excepting possibly the stack pointer.
+  void Drop(int count);
+  // Similar to VirtualFrame::Drop but we don't modify the actual stack.
+  // This is because we need to manually restore sp to the correct position.
+  void DropFromVFrameOnly(int count);
+
+  // Drop one element.
+  void Drop() { Drop(1); }
+  void DropFromVFrameOnly() { DropFromVFrameOnly(1); }
+
+  // Duplicate the top element of the frame.
+  void Dup() { PushFrameSlotAt(element_count() - 1); }
+
+  // Pop an element from the top of the expression stack.  Returns a
+  // Result, which may be a constant or a register.
+  Result Pop();
+
+  // Pop and save an element from the top of the expression stack and
+  // emit a corresponding pop instruction.
+  void EmitPop(Register reg);
+  // Same but for multiple registers
+  void EmitMultiPop(RegList regs);  // higher indexed registers popped first
+  void EmitMultiPopReversed(RegList regs);  // lower first
+
+  // Push an element on top of the expression stack and emit a
+  // corresponding push instruction.
+  void EmitPush(Register reg);
+  // Same but for multiple registers.
+  void EmitMultiPush(RegList regs);  // lower indexed registers are pushed first
+  void EmitMultiPushReversed(RegList regs);  // higher first
+
+  // Push an element on the virtual frame.
+  void Push(Register reg);
+  void Push(Handle<Object> value);
+  void Push(Smi* value) { Push(Handle<Object>(value)); }
+
+  // Pushing a result invalidates it (its contents become owned by the frame).
+  void Push(Result* result) {
+    if (result->is_register()) {
+      Push(result->reg());
+    } else {
+      ASSERT(result->is_constant());
+      Push(result->handle());
+    }
+    result->Unuse();
+  }
+
+  // Nip removes zero or more elements from immediately below the top
+  // of the frame, leaving the previous top-of-frame value on top of
+  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+  void Nip(int num_dropped);
+
+  // This pushes 4 arguments slots on the stack and saves asked 'a' registers
+  // 'a' registers are arguments register a0 to a3.
+  void EmitArgumentSlots(RegList reglist);
+
+ private:
+  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+  static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
+  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
+
+  ZoneList<FrameElement> elements_;
+
+  // The index of the element that is at the processor's stack pointer
+  // (the sp register).
+  int stack_pointer_;
+
+  // The index of the register frame element using each register, or
+  // kIllegalIndex if a register is not on the frame.
+  int register_locations_[RegisterAllocator::kNumRegisters];
+
+  // The number of frame-allocated locals and parameters respectively.
+  int parameter_count() { return cgen()->scope()->num_parameters(); }
+  int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+  // The index of the element that is at the processor's frame pointer
+  // (the fp register).  The parameters, receiver, function, and context
+  // are below the frame pointer.
+  int frame_pointer() { return parameter_count() + 3; }
+
+  // The index of the first parameter.  The receiver lies below the first
+  // parameter.
+  int param0_index() { return 1; }
+
+  // The index of the context slot in the frame.  It is immediately
+  // below the frame pointer.
+  int context_index() { return frame_pointer() - 1; }
+
+  // The index of the function slot in the frame.  It is below the frame
+  // pointer and context slot.
+  int function_index() { return frame_pointer() - 2; }
+
+  // The index of the first local.  Between the frame pointer and the
+  // locals lies the return address.
+  int local0_index() { return frame_pointer() + 2; }
+
+  // The index of the base of the expression stack.
+  int expression_base_index() { return local0_index() + local_count(); }
+
+  // Convert a frame index into a frame pointer relative offset into the
+  // actual stack.
+  int fp_relative(int index) {
+    ASSERT(index < element_count());
+    ASSERT(frame_pointer() < element_count());  // FP is on the frame.
+    return (frame_pointer() - index) * kPointerSize;
+  }
+
+  // Record an occurrence of a register in the virtual frame.  This has the
+  // effect of incrementing the register's external reference count and
+  // of updating the index of the register's location in the frame.
+  void Use(Register reg, int index) {
+    ASSERT(!is_used(reg));
+    set_register_location(reg, index);
+    cgen()->allocator()->Use(reg);
+  }
+
+  // Record that a register reference has been dropped from the frame.  This
+  // decrements the register's external reference count and invalidates the
+  // index of the register's location in the frame.
+  void Unuse(Register reg) {
+    ASSERT(is_used(reg));
+    set_register_location(reg, kIllegalIndex);
+    cgen()->allocator()->Unuse(reg);
+  }
+
+  // Spill the element at a particular index---write it to memory if
+  // necessary, free any associated register, and forget its value if
+  // constant.
+  void SpillElementAt(int index);
+
+  // Sync the element at a particular index.  If it is a register or
+  // constant that disagrees with the value on the stack, write it to memory.
+  // Keep the element type as register or constant, and clear the dirty bit.
+  void SyncElementAt(int index);
+
+  // Sync the range of elements in [begin, end] with memory.
+  void SyncRange(int begin, int end);
+
+  // Sync a single unsynced element that lies beneath or at the stack pointer.
+  void SyncElementBelowStackPointer(int index);
+
+  // Sync a single unsynced element that lies just above the stack pointer.
+  void SyncElementByPushing(int index);
+
+  // Push a copy of a frame slot (typically a local or parameter) on top of
+  // the frame.
+  void PushFrameSlotAt(int index);
+
+  // Push a the value of a frame slot (typically a local or parameter) on
+  // top of the frame and invalidate the slot.
+  void TakeFrameSlotAt(int index);
+
+  // Store the value on top of the frame to a frame slot (typically a local
+  // or parameter).
+  void StoreToFrameSlotAt(int index);
+
+  // Spill all elements in registers. Spill the top spilled_args elements
+  // on the frame.  Sync all other frame elements.
+  // Then drop dropped_args elements from the virtual frame, to match
+  // the effect of an upcoming call that will drop them from the stack.
+  void PrepareForCall(int spilled_args, int dropped_args);
+
+  // Move frame elements currently in registers or constants, that
+  // should be in memory in the expected frame, to memory.
+  void MergeMoveRegistersToMemory(VirtualFrame* expected);
+
+  // Make the register-to-register moves necessary to
+  // merge this frame with the expected frame.
+  // Register to memory moves must already have been made,
+  // and memory to register moves must follow this call.
+  // This is because some new memory-to-register moves are
+  // created in order to break cycles of register moves.
+  // Used in the implementation of MergeTo().
+  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+
+  // Make the memory-to-register and constant-to-register moves
+  // needed to make this frame equal the expected frame.
+  // Called after all register-to-memory and register-to-register
+  // moves have been made.  After this function returns, the frames
+  // should be equal.
+  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+
+  // Invalidates a frame slot (puts an invalid frame element in it).
+  // Copies on the frame are correctly handled, and if this slot was
+  // the backing store of copies, the index of the new backing store
+  // is returned.  Otherwise, returns kIllegalIndex.
+  // Register counts are correctly updated.
+  int InvalidateFrameSlotAt(int index);
+
+  // Call a code stub that has already been prepared for calling (via
+  // PrepareForCall).
+  void RawCallStub(CodeStub* stub);
+
+  // Calls a code object which has already been prepared for calling
+  // (via PrepareForCall).
+  void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+
+  bool Equals(VirtualFrame* other);
+
+  // Classes that need raw access to the elements_ array.
+  friend class DeferredCode;
+  friend class JumpTarget;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_VIRTUAL_FRAME_MIPS_H_
+
diff --git a/src/mirror-delay.js b/src/mirror-delay.js
index 1487ce5..e1bedfd 100644
--- a/src/mirror-delay.js
+++ b/src/mirror-delay.js
@@ -1733,7 +1733,8 @@
 
 
 ScriptMirror.prototype.name = function() {
-  return this.script_.name;
+  // If we have name, we trust it more than sourceURL from comments
+  return this.script_.name || this.sourceUrlFromComment_();
 };
 
 
@@ -1829,6 +1830,29 @@
 
 
 /**
+ * Returns a suggested script URL from comments in script code (if found), 
+ * undefined otherwise. Used primarily by debuggers for identifying eval()'ed
+ * scripts. See 
+ * http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
+ * for details.
+ * 
+ * @return {?string} value for //@ sourceURL comment
+ */
+ScriptMirror.prototype.sourceUrlFromComment_ = function() {
+  if (!('sourceUrl_' in this) && this.source()) {
+    // TODO(608): the spaces in a regexp below had to be escaped as \040 
+    // because this file is being processed by js2c whose handling of spaces
+    // in regexps is broken.
+    // We're not using \s here to prevent \n from matching.
+    var sourceUrlPattern = /\/\/@[\040\t]sourceURL=[\040\t]*(\S+)[\040\t]*$/m;
+    var match = sourceUrlPattern.exec(this.source());
+    this.sourceUrl_ = match ? match[1] : undefined;
+  }
+  return this.sourceUrl_;
+};
+
+
+/**
  * Mirror object for context.
  * @param {Object} data The context data
  * @constructor
diff --git a/src/number-info.h b/src/number-info.h
new file mode 100644
index 0000000..c6f32e4
--- /dev/null
+++ b/src/number-info.h
@@ -0,0 +1,72 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_NUMBER_INFO_H_
+#define V8_NUMBER_INFO_H_
+
+namespace v8 {
+namespace internal {
+
+class NumberInfo : public AllStatic {
+ public:
+  enum Type {
+    kUnknown = 0,
+    kNumber = 1,
+    kSmi = 3,
+    kHeapNumber = 5,
+    kUninitialized = 7
+  };
+
+  // Return the weakest (least precise) common type.
+  static Type Combine(Type a, Type b) {
+    // Make use of the order of enum values.
+    return static_cast<Type>(a & b);
+  }
+
+  static bool IsNumber(Type a) {
+    ASSERT(a != kUninitialized);
+    return ((a & kNumber) != 0);
+  }
+
+  static const char* ToString(Type a) {
+    switch (a) {
+      case kUnknown: return "UnknownType";
+      case kNumber: return "NumberType";
+      case kSmi: return "SmiType";
+      case kHeapNumber: return "HeapNumberType";
+      case kUninitialized:
+        UNREACHABLE();
+        return "UninitializedType";
+    }
+    UNREACHABLE();
+    return "Unreachable code";
+  }
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_NUMBER_INFO_H_
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 7e77e81..9415bc1 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -1036,6 +1036,8 @@
 
 void FunctionTemplateInfo::FunctionTemplateInfoPrint() {
   HeapObject::PrintHeader("FunctionTemplateInfo");
+  PrintF("\n - class name: ");
+  class_name()->ShortPrint();
   PrintF("\n - tag: ");
   tag()->ShortPrint();
   PrintF("\n - property_list: ");
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 4355fe9..455a84c 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2773,6 +2773,13 @@
 }
 
 
+bool JSObject::AllowsSetElementsLength() {
+  bool result = elements()->IsFixedArray();
+  ASSERT(result == (!HasPixelElements() && !HasExternalArrayElements()));
+  return result;
+}
+
+
 StringDictionary* JSObject::property_dictionary() {
   ASSERT(!HasFastProperties());
   return StringDictionary::cast(properties());
diff --git a/src/objects.cc b/src/objects.cc
index 6dd1d49..edb7680 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -219,7 +219,7 @@
     LookupResult* result,
     String* name,
     PropertyAttributes* attributes) {
-  if (result->IsValid()) {
+  if (result->IsProperty()) {
     switch (result->type()) {
       case CALLBACKS: {
         // Only allow API accessors.
@@ -242,7 +242,7 @@
         // Search ALL_CAN_READ accessors in prototype chain.
         LookupResult r;
         result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
-        if (r.IsValid()) {
+        if (r.IsProperty()) {
           return GetPropertyWithFailedAccessCheck(receiver,
                                                   &r,
                                                   name,
@@ -255,16 +255,15 @@
         // No access check in GetPropertyAttributeWithInterceptor.
         LookupResult r;
         result->holder()->LookupRealNamedProperty(name, &r);
-        if (r.IsValid()) {
+        if (r.IsProperty()) {
           return GetPropertyWithFailedAccessCheck(receiver,
                                                   &r,
                                                   name,
                                                   attributes);
         }
       }
-      default: {
-        break;
-      }
+      default:
+        UNREACHABLE();
     }
   }
 
@@ -280,7 +279,7 @@
     LookupResult* result,
     String* name,
     bool continue_search) {
-  if (result->IsValid()) {
+  if (result->IsProperty()) {
     switch (result->type()) {
       case CALLBACKS: {
         // Only allow API accessors.
@@ -301,7 +300,7 @@
         // Search ALL_CAN_READ accessors in prototype chain.
         LookupResult r;
         result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
-        if (r.IsValid()) {
+        if (r.IsProperty()) {
           return GetPropertyAttributeWithFailedAccessCheck(receiver,
                                                            &r,
                                                            name,
@@ -319,7 +318,7 @@
         } else {
           result->holder()->LocalLookupRealNamedProperty(name, &r);
         }
-        if (r.IsValid()) {
+        if (r.IsProperty()) {
           return GetPropertyAttributeWithFailedAccessCheck(receiver,
                                                            &r,
                                                            name,
@@ -328,9 +327,8 @@
         break;
       }
 
-      default: {
-        break;
-      }
+      default:
+        UNREACHABLE();
     }
   }
 
@@ -505,7 +503,7 @@
   // holder will always be the interceptor holder and the search may
   // only continue with a current object just after the interceptor
   // holder in the prototype chain.
-  Object* last = result->IsValid() ? result->holder() : Heap::null_value();
+  Object* last = result->IsProperty() ? result->holder() : Heap::null_value();
   for (Object* current = this; true; current = current->GetPrototype()) {
     if (current->IsAccessCheckNeeded()) {
       // Check if we're allowed to read from the current object. Note
@@ -1463,8 +1461,12 @@
   // Check local property, ignore interceptor.
   LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
-  if (result.IsValid()) return SetProperty(&result, name, value, attributes);
-  // Add real property.
+  if (result.IsFound()) {
+    // An existing property, a map transition or a null descriptor was
+    // found.  Use set property to handle all these cases.
+    return SetProperty(&result, name, value, attributes);
+  }
+  // Add a new real property.
   return AddProperty(name, value, attributes);
 }
 
@@ -1696,8 +1698,8 @@
        pt != Heap::null_value();
        pt = pt->GetPrototype()) {
     JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
-    if (result->IsValid()) {
-      if (!result->IsTransitionType() && result->IsReadOnly()) {
+    if (result->IsProperty()) {
+      if (result->IsReadOnly()) {
         result->NotFound();
         return;
       }
@@ -1758,7 +1760,11 @@
 
   if (HasFastProperties()) {
     LookupInDescriptor(name, result);
-    if (result->IsValid()) {
+    if (result->IsFound()) {
+      // A property, a map transition or a null descriptor was found.
+      // We return all of these result types because
+      // LocalLookupRealNamedProperty is used when setting properties
+      // where map transitions and null descriptors are handled.
       ASSERT(result->holder() == this && result->type() != NORMAL);
       // Disallow caching for uninitialized constants. These can only
       // occur as fields.
@@ -1808,16 +1814,7 @@
        pt != Heap::null_value();
        pt = JSObject::cast(pt)->GetPrototype()) {
     JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
-    if (result->IsValid()) {
-      switch (result->type()) {
-        case NORMAL:
-        case FIELD:
-        case CONSTANT_FUNCTION:
-        case CALLBACKS:
-          return;
-        default: break;
-      }
-    }
+    if (result->IsProperty() && (result->type() != INTERCEPTOR)) return;
   }
   result->NotFound();
 }
@@ -1903,14 +1900,15 @@
     // accessor that wants to handle the property.
     LookupResult accessor_result;
     LookupCallbackSetterInPrototypes(name, &accessor_result);
-    if (accessor_result.IsValid()) {
+    if (accessor_result.IsProperty()) {
       return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
                                      name,
                                      value,
                                      accessor_result.holder());
     }
   }
-  if (result->IsNotFound()) {
+  if (!result->IsFound()) {
+    // Neither properties nor transitions found.
     return AddProperty(name, value, attributes);
   }
   if (!result->IsLoaded()) {
@@ -1972,15 +1970,12 @@
   // Make sure that the top context does not change when doing callbacks or
   // interceptor calls.
   AssertNoContextChange ncc;
-  // ADDED TO CLONE
-  LookupResult result_struct;
-  LocalLookup(name, &result_struct);
-  LookupResult* result = &result_struct;
-  // END ADDED TO CLONE
+  LookupResult result;
+  LocalLookup(name, &result);
   // Check access rights if needed.
   if (IsAccessCheckNeeded()
-    && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
-    return SetPropertyWithFailedAccessCheck(result, name, value);
+      && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+    return SetPropertyWithFailedAccessCheck(&result, name, value);
   }
 
   if (IsJSGlobalProxy()) {
@@ -1994,31 +1989,34 @@
   }
 
   // Check for accessor in prototype chain removed here in clone.
-  if (result->IsNotFound()) {
+  if (!result.IsFound()) {
+    // Neither properties nor transitions found.
     return AddProperty(name, value, attributes);
   }
-  if (!result->IsLoaded()) {
-    return SetLazyProperty(result, name, value, attributes);
+  if (!result.IsLoaded()) {
+    return SetLazyProperty(&result, name, value, attributes);
   }
+  PropertyDetails details = PropertyDetails(attributes, NORMAL);
+
   // Check of IsReadOnly removed from here in clone.
-  switch (result->type()) {
+  switch (result.type()) {
     case NORMAL:
-      return SetNormalizedProperty(result, value);
+      return SetNormalizedProperty(name, value, details);
     case FIELD:
-      return FastPropertyAtPut(result->GetFieldIndex(), value);
+      return FastPropertyAtPut(result.GetFieldIndex(), value);
     case MAP_TRANSITION:
-      if (attributes == result->GetAttributes()) {
+      if (attributes == result.GetAttributes()) {
         // Only use map transition if the attributes match.
-        return AddFastPropertyUsingMap(result->GetTransitionMap(),
+        return AddFastPropertyUsingMap(result.GetTransitionMap(),
                                        name,
                                        value);
       }
       return ConvertDescriptorToField(name, value, attributes);
     case CONSTANT_FUNCTION:
       // Only replace the function if necessary.
-      if (value == result->GetConstantFunction()) return value;
+      if (value == result.GetConstantFunction()) return value;
       // Preserve the attributes of this existing property.
-      attributes = result->GetAttributes();
+      attributes = result.GetAttributes();
       return ConvertDescriptorToField(name, value, attributes);
     case CALLBACKS:
     case INTERCEPTOR:
@@ -2134,7 +2132,7 @@
                                                      name,
                                                      continue_search);
   }
-  if (result->IsValid()) {
+  if (result->IsProperty()) {
     switch (result->type()) {
       case NORMAL:  // fall through
       case FIELD:
@@ -2144,13 +2142,8 @@
       case INTERCEPTOR:
         return result->holder()->
           GetPropertyAttributeWithInterceptor(receiver, name, continue_search);
-      case MAP_TRANSITION:
-      case CONSTANT_TRANSITION:
-      case NULL_DESCRIPTOR:
-        return ABSENT;
       default:
         UNREACHABLE();
-        break;
     }
   }
   return ABSENT;
@@ -2323,7 +2316,7 @@
   // Check local property, ignore interceptor.
   LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
-  if (!result.IsValid()) return Heap::true_value();
+  if (!result.IsProperty()) return Heap::true_value();
 
   // Normalize object if needed.
   Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
@@ -2507,7 +2500,7 @@
   } else {
     LookupResult result;
     LocalLookup(name, &result);
-    if (!result.IsValid()) return Heap::true_value();
+    if (!result.IsProperty()) return Heap::true_value();
     // Ignore attributes if forcing a deletion.
     if (result.IsDontDelete() && mode != FORCE_DELETION) {
       return Heap::false_value();
@@ -2742,7 +2735,7 @@
        current != Heap::null_value();
        current = JSObject::cast(current)->GetPrototype()) {
     JSObject::cast(current)->LocalLookup(name, result);
-    if (result->IsValid() && !result->IsTransitionType()) return;
+    if (result->IsProperty()) return;
   }
   result->NotFound();
 }
@@ -2754,7 +2747,7 @@
        current != Heap::null_value();
        current = JSObject::cast(current)->GetPrototype()) {
     JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
-    if (result->IsValid() && result->type() == CALLBACKS) return;
+    if (result->IsProperty() && result->type() == CALLBACKS) return;
   }
   result->NotFound();
 }
@@ -2784,7 +2777,7 @@
   // cause security problems.
   LookupResult callback_result;
   LookupCallback(name, &callback_result);
-  if (callback_result.IsValid()) {
+  if (callback_result.IsFound()) {
     Object* obj = callback_result.GetCallbackObject();
     if (obj->IsAccessorInfo() &&
         AccessorInfo::cast(obj)->prohibits_overwriting()) {
@@ -2835,11 +2828,16 @@
     // Lookup the name.
     LookupResult result;
     LocalLookup(name, &result);
-    if (result.IsValid()) {
+    if (result.IsProperty()) {
       if (result.IsReadOnly()) return Heap::undefined_value();
       if (result.type() == CALLBACKS) {
         Object* obj = result.GetCallbackObject();
         if (obj->IsFixedArray()) {
+          // The object might be in fast mode even though it has
+          // a getter/setter.
+          Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+          if (ok->IsFailure()) return ok;
+
           PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
           SetNormalizedProperty(name, obj, details);
           return obj;
@@ -2952,7 +2950,7 @@
          obj = JSObject::cast(obj)->GetPrototype()) {
       LookupResult result;
       JSObject::cast(obj)->LocalLookup(name, &result);
-      if (result.IsValid()) {
+      if (result.IsProperty()) {
         if (result.IsReadOnly()) return Heap::undefined_value();
         if (result.type() == CALLBACKS) {
           Object* obj = result.GetCallbackObject();
@@ -4820,6 +4818,40 @@
 }
 
 
+bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
+  // Check the basic conditions for generating inline constructor code.
+  if (!FLAG_inline_new
+      || !has_only_simple_this_property_assignments()
+      || this_property_assignments_count() == 0) {
+    return false;
+  }
+
+  // If the prototype is null inline constructors cause no problems.
+  if (!prototype->IsJSObject()) {
+    ASSERT(prototype->IsNull());
+    return true;
+  }
+
+  // Traverse the proposed prototype chain looking for setters for properties of
+  // the same names as are set by the inline constructor.
+  for (Object* obj = prototype;
+       obj != Heap::null_value();
+       obj = obj->GetPrototype()) {
+    JSObject* js_object = JSObject::cast(obj);
+    for (int i = 0; i < this_property_assignments_count(); i++) {
+      LookupResult result;
+      String* name = GetThisPropertyAssignmentName(i);
+      js_object->LocalLookupRealNamedProperty(name, &result);
+      if (result.IsProperty() && result.type() == CALLBACKS) {
+        return false;
+      }
+    }
+  }
+
+  return true;
+}
+
+
 void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
     bool only_simple_this_property_assignments,
     FixedArray* assignments) {
@@ -4875,7 +4907,6 @@
 }
 
 
-
 // Support function for printing the source code to a StringStream
 // without any allocation in the heap.
 void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
@@ -5273,7 +5304,7 @@
 
 Object* JSObject::SetElementsLength(Object* len) {
   // We should never end in here with a pixel or external array.
-  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
+  ASSERT(AllowsSetElementsLength());
 
   Object* smi_length = len->ToSmi();
   if (smi_length->IsSmi()) {
@@ -5353,6 +5384,48 @@
 }
 
 
+Object* JSObject::SetPrototype(Object* value,
+                               bool skip_hidden_prototypes) {
+  // Silently ignore the change if value is not a JSObject or null.
+  // SpiderMonkey behaves this way.
+  if (!value->IsJSObject() && !value->IsNull()) return value;
+
+  // Before we can set the prototype we need to be sure
+  // prototype cycles are prevented.
+  // It is sufficient to validate that the receiver is not in the new prototype
+  // chain.
+  for (Object* pt = value; pt != Heap::null_value(); pt = pt->GetPrototype()) {
+    if (JSObject::cast(pt) == this) {
+      // Cycle detected.
+      HandleScope scope;
+      return Top::Throw(*Factory::NewError("cyclic_proto",
+                                           HandleVector<Object>(NULL, 0)));
+    }
+  }
+
+  JSObject* real_receiver = this;
+
+  if (skip_hidden_prototypes) {
+    // Find the first object in the chain whose prototype object is not
+    // hidden and set the new prototype on that object.
+    Object* current_proto = real_receiver->GetPrototype();
+    while (current_proto->IsJSObject() &&
+          JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
+      real_receiver = JSObject::cast(current_proto);
+      current_proto = current_proto->GetPrototype();
+    }
+  }
+
+  // Set the new prototype of the object.
+  Object* new_map = real_receiver->map()->CopyDropTransitions();
+  if (new_map->IsFailure()) return new_map;
+  Map::cast(new_map)->set_prototype(value);
+  real_receiver->set_map(Map::cast(new_map));
+
+  return value;
+}
+
+
 bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
   switch (GetElementsKind()) {
     case FAST_ELEMENTS: {
@@ -6170,7 +6243,9 @@
   // Check local property in holder, ignore interceptor.
   LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
-  if (result.IsValid()) return GetProperty(receiver, &result, name, attributes);
+  if (result.IsProperty()) {
+    return GetProperty(receiver, &result, name, attributes);
+  }
   // Continue searching via the prototype chain.
   Object* pt = GetPrototype();
   *attributes = ABSENT;
@@ -6186,8 +6261,10 @@
   // Check local property in holder, ignore interceptor.
   LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
-  if (!result.IsValid()) return Heap::undefined_value();
-  return GetProperty(receiver, &result, name, attributes);
+  if (result.IsProperty()) {
+    return GetProperty(receiver, &result, name, attributes);
+  }
+  return Heap::undefined_value();
 }
 
 
@@ -6239,24 +6316,7 @@
 
   LookupResult result;
   LocalLookupRealNamedProperty(key, &result);
-  if (result.IsValid()) {
-    switch (result.type()) {
-      case NORMAL:    // fall through.
-      case FIELD:     // fall through.
-      case CALLBACKS:  // fall through.
-      case CONSTANT_FUNCTION:
-        return true;
-      case INTERCEPTOR:
-      case MAP_TRANSITION:
-      case CONSTANT_TRANSITION:
-      case NULL_DESCRIPTOR:
-        return false;
-      default:
-        UNREACHABLE();
-    }
-  }
-
-  return false;
+  return result.IsProperty() && (result.type() != INTERCEPTOR);
 }
 
 
@@ -6318,7 +6378,7 @@
 
   LookupResult result;
   LocalLookupRealNamedProperty(key, &result);
-  return result.IsValid() && (result.type() == CALLBACKS);
+  return result.IsProperty() && (result.type() == CALLBACKS);
 }
 
 
diff --git a/src/objects.h b/src/objects.h
index f641196..3ed0a70 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -34,6 +34,8 @@
 #include "unicode-inl.h"
 #if V8_TARGET_ARCH_ARM
 #include "arm/constants-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/constants-mips.h"
 #endif
 
 //
@@ -1101,7 +1103,6 @@
 # define BIG_ENDIAN_FLOATING_POINT 1
 #endif
   static const int kSize = kValueOffset + kDoubleSize;
-
   static const uint32_t kSignMask = 0x80000000u;
   static const uint32_t kExponentMask = 0x7ff00000u;
   static const uint32_t kMantissaMask = 0xfffffu;
@@ -1160,6 +1161,7 @@
   inline bool HasExternalIntElements();
   inline bool HasExternalUnsignedIntElements();
   inline bool HasExternalFloatElements();
+  inline bool AllowsSetElementsLength();
   inline NumberDictionary* element_dictionary();  // Gets slow elements.
 
   // Collects elements starting at index 0.
@@ -1316,6 +1318,9 @@
   // Return the object's prototype (might be Heap::null_value()).
   inline Object* GetPrototype();
 
+  // Set the object's prototype (only JSObject and null are allowed).
+  Object* SetPrototype(Object* value, bool skip_hidden_prototypes);
+
   // Tells whether the index'th element is present.
   inline bool HasElement(uint32_t index);
   bool HasElementWithReceiver(JSObject* receiver, uint32_t index);
@@ -3230,6 +3235,10 @@
   inline bool try_full_codegen();
   inline void set_try_full_codegen(bool flag);
 
+  // Check whether a inlined constructor can be generated with the given
+  // prototype.
+  bool CanGenerateInlineConstructor(Object* prototype);
+
   // For functions which only contains this property assignments this provides
   // access to the names for the properties assigned.
   DECL_ACCESSORS(this_property_assignments, Object)
diff --git a/src/parser.cc b/src/parser.cc
index b06d86f..5058296 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -1690,7 +1690,8 @@
   // Propagate the collected information on this property assignments.
   if (top_scope_->is_function_scope()) {
     bool only_simple_this_property_assignments =
-        this_property_assignment_finder.only_simple_this_property_assignments();
+        this_property_assignment_finder.only_simple_this_property_assignments()
+        && top_scope_->declarations()->length() == 0;
     if (only_simple_this_property_assignments) {
       temp_scope_->SetThisPropertyAssignmentInfo(
           only_simple_this_property_assignments,
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 005b1de..e890f94 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -89,6 +89,8 @@
   // Here gcc is telling us that we are on an ARM and gcc is assuming that we
   // have VFP3 instructions.  If gcc can assume it then so can we.
   return 1u << VFP3;
+#elif CAN_USE_ARMV7_INSTRUCTIONS
+  return 1u << ARMv7;
 #else
   return 0;  // Linux runs on anything.
 #endif
@@ -113,6 +115,9 @@
     case VFP3:
       search_string = "vfp";
       break;
+    case ARMv7:
+      search_string = "ARMv7";
+      break;
     default:
       UNREACHABLE();
   }
@@ -151,11 +156,12 @@
   // On EABI ARM targets this is required for fp correctness in the
   // runtime system.
   return 8;
-#else
+#elif V8_TARGET_ARCH_MIPS
+  return 8;
+#endif
   // With gcc 4.4 the tree vectorization optimiser can generate code
   // that requires 16 byte alignment such as movdqa on x86.
   return 16;
-#endif
 }
 
 
@@ -262,6 +268,8 @@
 //  which is the architecture of generated code).
 #if defined(__arm__) || defined(__thumb__)
   asm("bkpt 0");
+#elif defined(__mips__)
+  asm("break");
 #else
   asm("int $3");
 #endif
@@ -713,6 +721,7 @@
 
 
 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+#ifndef V8_HOST_ARCH_MIPS
   USE(info);
   if (signal != SIGPROF) return;
   if (active_sampler_ == NULL) return;
@@ -743,6 +752,9 @@
     sample.sp = reinterpret_cast<Address>(mcontext.arm_sp);
     sample.fp = reinterpret_cast<Address>(mcontext.arm_fp);
 #endif
+#elif V8_HOST_ARCH_MIPS
+    // Implement this on MIPS.
+    UNIMPLEMENTED();
 #endif
     if (IsVmThread())
       active_sampler_->SampleStack(&sample);
@@ -752,6 +764,7 @@
   sample.state = Logger::state();
 
   active_sampler_->Tick(&sample);
+#endif
 }
 
 
diff --git a/src/property.cc b/src/property.cc
index caa7397..b579b68 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -33,7 +33,7 @@
 
 #ifdef DEBUG
 void LookupResult::Print() {
-  if (!IsValid()) {
+  if (!IsFound()) {
     PrintF("Not Found\n");
     return;
   }
diff --git a/src/property.h b/src/property.h
index 1869719..dc51348 100644
--- a/src/property.h
+++ b/src/property.h
@@ -201,23 +201,17 @@
   }
 
   JSObject* holder() {
-    ASSERT(IsValid());
+    ASSERT(IsFound());
     return holder_;
   }
 
   PropertyType type() {
-    ASSERT(IsValid());
+    ASSERT(IsFound());
     return details_.type();
   }
 
-  bool IsTransitionType() {
-    PropertyType t = type();
-    if (t == MAP_TRANSITION || t == CONSTANT_TRANSITION) return true;
-    return false;
-  }
-
   PropertyAttributes GetAttributes() {
-    ASSERT(IsValid());
+    ASSERT(IsFound());
     return details_.attributes();
   }
 
@@ -229,14 +223,17 @@
   bool IsDontDelete() { return details_.IsDontDelete(); }
   bool IsDontEnum() { return details_.IsDontEnum(); }
   bool IsDeleted() { return details_.IsDeleted(); }
+  bool IsFound() { return lookup_type_ != NOT_FOUND; }
 
-  bool IsValid() { return  lookup_type_ != NOT_FOUND; }
-  bool IsNotFound() { return lookup_type_ == NOT_FOUND; }
-
-  // Tells whether the result is a property.
-  // Excluding transitions and the null descriptor.
+  // Is the result is a property excluding transitions and the null
+  // descriptor?
   bool IsProperty() {
-    return IsValid() && type() < FIRST_PHANTOM_PROPERTY_TYPE;
+    return IsFound() && (type() < FIRST_PHANTOM_PROPERTY_TYPE);
+  }
+
+  // Is the result a property or a transition?
+  bool IsPropertyOrTransition() {
+    return IsFound() && (type() != NULL_DESCRIPTOR);
   }
 
   bool IsCacheable() { return cacheable_; }
diff --git a/src/register-allocator-inl.h b/src/register-allocator-inl.h
index 8fb498b..a99f455 100644
--- a/src/register-allocator-inl.h
+++ b/src/register-allocator-inl.h
@@ -38,6 +38,8 @@
 #include "x64/register-allocator-x64-inl.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/register-allocator-arm-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/register-allocator-mips-inl.h"
 #else
 #error Unsupported target architecture.
 #endif
@@ -46,6 +48,20 @@
 namespace v8 {
 namespace internal {
 
+Result::Result(const Result& other) {
+  other.CopyTo(this);
+}
+
+
+Result& Result::operator=(const Result& other) {
+  if (this != &other) {
+    Unuse();
+    other.CopyTo(this);
+  }
+  return *this;
+}
+
+
 Result::~Result() {
   if (is_register()) {
     CodeGeneratorScope::Current()->allocator()->Unuse(reg());
@@ -69,6 +85,25 @@
 }
 
 
+bool RegisterAllocator::is_used(Register reg) {
+  return registers_.is_used(ToNumber(reg));
+}
+
+
+int RegisterAllocator::count(Register reg) {
+  return registers_.count(ToNumber(reg));
+}
+
+
+void RegisterAllocator::Use(Register reg) {
+  registers_.Use(ToNumber(reg));
+}
+
+
+void RegisterAllocator::Unuse(Register reg) {
+  registers_.Unuse(ToNumber(reg));
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_REGISTER_ALLOCATOR_INL_H_
diff --git a/src/register-allocator.cc b/src/register-allocator.cc
index d55f949..349cc24 100644
--- a/src/register-allocator.cc
+++ b/src/register-allocator.cc
@@ -37,10 +37,12 @@
 // Result implementation.
 
 
-Result::Result(Register reg) {
+Result::Result(Register reg, NumberInfo::Type info) {
   ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
   CodeGeneratorScope::Current()->allocator()->Use(reg);
-  value_ = TypeField::encode(REGISTER) | DataField::encode(reg.code_);
+  value_ = TypeField::encode(REGISTER)
+      | NumberInfoField::encode(info)
+      | DataField::encode(reg.code_);
 }
 
 
@@ -50,6 +52,23 @@
 }
 
 
+NumberInfo::Type Result::number_info() {
+  ASSERT(is_valid());
+  if (!is_constant()) return NumberInfoField::decode(value_);
+  Handle<Object> value = handle();
+  if (value->IsSmi()) return NumberInfo::kSmi;
+  if (value->IsHeapNumber()) return NumberInfo::kHeapNumber;
+  return NumberInfo::kUnknown;
+}
+
+
+void Result::set_number_info(NumberInfo::Type info) {
+  ASSERT(is_valid());
+  value_ = value_ & ~NumberInfoField::mask();
+  value_ = value_ | NumberInfoField::encode(info);
+}
+
+
 // -------------------------------------------------------------------------
 // RegisterAllocator implementation.
 
diff --git a/src/register-allocator.h b/src/register-allocator.h
index 1765633..4ec0bb4 100644
--- a/src/register-allocator.h
+++ b/src/register-allocator.h
@@ -29,6 +29,7 @@
 #define V8_REGISTER_ALLOCATOR_H_
 
 #include "macro-assembler.h"
+#include "number-info.h"
 
 #if V8_TARGET_ARCH_IA32
 #include "ia32/register-allocator-ia32.h"
@@ -36,6 +37,8 @@
 #include "x64/register-allocator-x64.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/register-allocator-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/register-allocator-mips.h"
 #else
 #error Unsupported target architecture.
 #endif
@@ -62,28 +65,21 @@
   Result() { invalidate(); }
 
   // Construct a register Result.
-  explicit Result(Register reg);
+  explicit Result(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
 
   // Construct a Result whose value is a compile-time constant.
   explicit Result(Handle<Object> value) {
     value_ = TypeField::encode(CONSTANT)
+        | NumberInfoField::encode(NumberInfo::kUninitialized)
         | DataField::encode(ConstantList()->length());
     ConstantList()->Add(value);
   }
 
   // The copy constructor and assignment operators could each create a new
   // register reference.
-  Result(const Result& other) {
-    other.CopyTo(this);
-  }
+  inline Result(const Result& other);
 
-  Result& operator=(const Result& other) {
-    if (this != &other) {
-      Unuse();
-      other.CopyTo(this);
-    }
-    return *this;
-  }
+  inline Result& operator=(const Result& other);
 
   inline ~Result();
 
@@ -105,6 +101,14 @@
 
   void invalidate() { value_ = TypeField::encode(INVALID); }
 
+  NumberInfo::Type number_info();
+  void set_number_info(NumberInfo::Type info);
+  bool is_number() {
+    return (number_info() & NumberInfo::kNumber) != 0;
+  }
+  bool is_smi() { return number_info() == NumberInfo::kSmi; }
+  bool is_heap_number() { return number_info() == NumberInfo::kHeapNumber; }
+
   bool is_valid() const { return type() != INVALID; }
   bool is_register() const { return type() == REGISTER; }
   bool is_constant() const { return type() == CONSTANT; }
@@ -136,7 +140,8 @@
   uint32_t value_;
 
   class TypeField: public BitField<Type, 0, 2> {};
-  class DataField: public BitField<uint32_t, 2, 32 - 3> {};
+  class NumberInfoField : public BitField<NumberInfo::Type, 2, 3> {};
+  class DataField: public BitField<uint32_t, 5, 32 - 6> {};
 
   inline void CopyTo(Result* destination) const;
 
@@ -235,18 +240,18 @@
 
   // Predicates and accessors for the registers' reference counts.
   bool is_used(int num) { return registers_.is_used(num); }
-  bool is_used(Register reg) { return registers_.is_used(ToNumber(reg)); }
+  inline bool is_used(Register reg);
 
   int count(int num) { return registers_.count(num); }
-  int count(Register reg) { return registers_.count(ToNumber(reg)); }
+  inline int count(Register reg);
 
   // Explicitly record a reference to a register.
   void Use(int num) { registers_.Use(num); }
-  void Use(Register reg) { registers_.Use(ToNumber(reg)); }
+  inline void Use(Register reg);
 
   // Explicitly record that a register will no longer be used.
   void Unuse(int num) { registers_.Unuse(num); }
-  void Unuse(Register reg) { registers_.Unuse(ToNumber(reg)); }
+  inline void Unuse(Register reg);
 
   // Reset the register reference counts to free all non-reserved registers.
   void Reset() { registers_.Reset(); }
diff --git a/src/runtime.cc b/src/runtime.cc
index 515343b..b1d6024 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -596,8 +596,9 @@
 
   if (result.type() == CALLBACKS) {
     Object* structure = result.GetCallbackObject();
-    if (structure->IsProxy()) {
-      // Property that is internally implemented as a callback.
+    if (structure->IsProxy() || structure->IsAccessorInfo()) {
+      // Property that is internally implemented as a callback or
+      // an API defined callback.
       Object* value = obj->GetPropertyWithCallback(
           obj, structure, name, result.holder());
       elms->set(0, Heap::false_value());
@@ -609,7 +610,6 @@
       elms->set(1, FixedArray::cast(structure)->get(0));
       elms->set(2, FixedArray::cast(structure)->get(1));
     } else {
-      // TODO(ricow): Handle API callbacks.
       return Heap::undefined_value();
     }
   } else {
@@ -619,7 +619,7 @@
   }
 
   elms->set(3, Heap::ToBoolean(!result.IsDontEnum()));
-  elms->set(4, Heap::ToBoolean(!result.IsReadOnly()));
+  elms->set(4, Heap::ToBoolean(!result.IsDontDelete()));
   return *desc;
 }
 
@@ -1208,17 +1208,6 @@
 }
 
 
-static Object* Runtime_TransformToFastProperties(Arguments args) {
-  HandleScope scope;
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSObject, object, 0);
-  if (!object->HasFastProperties() && !object->IsGlobalObject()) {
-    TransformToFastProperties(object, 0);
-  }
-  return *object;
-}
-
-
 static Object* Runtime_RegExpExec(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 4);
@@ -2888,6 +2877,67 @@
 }
 
 
+static Object* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
+  ASSERT(args.length() == 5);
+  HandleScope scope;
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_CHECKED(String, name, args[1]);
+  CONVERT_CHECKED(Smi, flag_setter, args[2]);
+  CONVERT_CHECKED(JSFunction, fun, args[3]);
+  CONVERT_CHECKED(Smi, flag_attr, args[4]);
+  int unchecked = flag_attr->value();
+  RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+  RUNTIME_ASSERT(!obj->IsNull());
+  LookupResult result;
+  obj->LocalLookupRealNamedProperty(name, &result);
+
+  PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
+  // If an existing property is either FIELD, NORMAL or CONSTANT_FUNCTION
+  // delete it to avoid running into trouble in DefineAccessor, which
+  // handles this incorrectly if the property is readonly (does nothing)
+  if (result.IsProperty() &&
+      (result.type() == FIELD || result.type() == NORMAL
+       || result.type() == CONSTANT_FUNCTION)) {
+    obj->DeleteProperty(name, JSObject::NORMAL_DELETION);
+  }
+  return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr);
+}
+
+static Object* Runtime_DefineOrRedefineDataProperty(Arguments args) {
+  ASSERT(args.length() == 4);
+  HandleScope scope;
+  CONVERT_ARG_CHECKED(JSObject, js_object, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
+  Handle<Object> obj_value = args.at<Object>(2);
+
+  CONVERT_CHECKED(Smi, flag, args[3]);
+  int unchecked = flag->value();
+  RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+
+  LookupResult result;
+  js_object->LocalLookupRealNamedProperty(*name, &result);
+
+  PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
+
+  // Take special care when attributes are different and there is already
+  // a property. For simplicity we normalize the property which enables us
+  // to not worry about changing the instance_descriptor and creating a new
+  // map. The current version of SetObjectProperty does not handle attributes
+  // correctly in the case where a property is a field and is reset with
+  // new attributes.
+  if (result.IsProperty() && attr != result.GetAttributes()) {
+    // New attributes - normalize to avoid writing to instance descriptor
+    js_object->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+    // Use IgnoreAttributes version since a readonly property may be
+    // overridden and SetProperty does not allow this.
+    return js_object->IgnoreAttributesAndSetLocalProperty(*name,
+                                                          *obj_value,
+                                                          attr);
+  }
+  return Runtime::SetObjectProperty(js_object, name, obj_value, attr);
+}
+
+
 Object* Runtime::SetObjectProperty(Handle<Object> object,
                                    Handle<Object> key,
                                    Handle<Object> value,
@@ -3461,17 +3511,23 @@
 
 
 static Object* Runtime_ToFastProperties(Arguments args) {
+  HandleScope scope;
+
   ASSERT(args.length() == 1);
   Handle<Object> object = args.at<Object>(0);
   if (object->IsJSObject()) {
     Handle<JSObject> js_object = Handle<JSObject>::cast(object);
-    js_object->TransformToFastProperties(0);
+    if (!js_object->HasFastProperties() && !js_object->IsGlobalObject()) {
+      js_object->TransformToFastProperties(0);
+    }
   }
   return *object;
 }
 
 
 static Object* Runtime_ToSlowProperties(Arguments args) {
+  HandleScope scope;
+
   ASSERT(args.length() == 1);
   Handle<Object> object = args.at<Object>(0);
   if (object->IsJSObject()) {
@@ -4709,41 +4765,6 @@
 }
 
 
-// The NewArguments function is only used when constructing the
-// arguments array when calling non-functions from JavaScript in
-// runtime.js:CALL_NON_FUNCTION.
-static Object* Runtime_NewArguments(Arguments args) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 1);
-
-  // ECMA-262, 3rd., 10.1.8, p.39
-  CONVERT_CHECKED(JSFunction, callee, args[0]);
-
-  // Compute the frame holding the arguments.
-  JavaScriptFrameIterator it;
-  it.AdvanceToArgumentsFrame();
-  JavaScriptFrame* frame = it.frame();
-
-  const int length = frame->GetProvidedParametersCount();
-  Object* result = Heap::AllocateArgumentsObject(callee, length);
-  if (result->IsFailure()) return result;
-  if (length > 0) {
-    Object* obj =  Heap::AllocateFixedArray(length);
-    if (obj->IsFailure()) return obj;
-    FixedArray* array = FixedArray::cast(obj);
-    ASSERT(array->length() == length);
-
-    AssertNoAllocation no_gc;
-    WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
-    for (int i = 0; i < length; i++) {
-      array->set(i, frame->GetParameter(i), mode);
-    }
-    JSObject::cast(result)->set_elements(array);
-  }
-  return result;
-}
-
-
 static Object* Runtime_NewArgumentsFast(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
@@ -4790,21 +4811,21 @@
 }
 
 
-static Code* ComputeConstructStub(Handle<SharedFunctionInfo> shared) {
-  // TODO(385): Change this to create a construct stub specialized for
-  // the given map to make allocation of simple objects - and maybe
-  // arrays - much faster.
-  if (FLAG_inline_new
-      && shared->has_only_simple_this_property_assignments()) {
+static Code* ComputeConstructStub(Handle<JSFunction> function) {
+  Handle<Object> prototype = Factory::null_value();
+  if (function->has_instance_prototype()) {
+    prototype = Handle<Object>(function->instance_prototype());
+  }
+  if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
     ConstructStubCompiler compiler;
-    Object* code = compiler.CompileConstructStub(*shared);
+    Object* code = compiler.CompileConstructStub(function->shared());
     if (code->IsFailure()) {
       return Builtins::builtin(Builtins::JSConstructStubGeneric);
     }
     return Code::cast(code);
   }
 
-  return shared->construct_stub();
+  return function->shared()->construct_stub();
 }
 
 
@@ -4854,10 +4875,9 @@
   bool first_allocation = !function->has_initial_map();
   Handle<JSObject> result = Factory::NewJSObject(function);
   if (first_allocation) {
-    Handle<Map> map = Handle<Map>(function->initial_map());
     Handle<Code> stub = Handle<Code>(
-        ComputeConstructStub(Handle<SharedFunctionInfo>(function->shared())));
-    function->shared()->set_construct_stub(*stub);
+        ComputeConstructStub(Handle<JSFunction>(function)));
+    shared->set_construct_stub(*stub);
   }
 
   Counters::constructed_objects.Increment();
@@ -4896,28 +4916,6 @@
 }
 
 
-static Object* Runtime_GetCalledFunction(Arguments args) {
-  HandleScope scope;
-  ASSERT(args.length() == 0);
-  StackFrameIterator it;
-  // Get past the JS-to-C exit frame.
-  ASSERT(it.frame()->is_exit());
-  it.Advance();
-  // Get past the CALL_NON_FUNCTION activation frame.
-  ASSERT(it.frame()->is_java_script());
-  it.Advance();
-  // Argument adaptor frames do not copy the function; we have to skip
-  // past them to get to the real calling frame.
-  if (it.frame()->is_arguments_adaptor()) it.Advance();
-  // Get the function from the top of the expression stack of the
-  // calling frame.
-  StandardFrame* frame = StandardFrame::cast(it.frame());
-  int index = frame->ComputeExpressionsCount() - 1;
-  Object* result = frame->GetExpression(index);
-  return result;
-}
-
-
 static Object* Runtime_GetFunctionDelegate(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 1);
@@ -7922,20 +7920,22 @@
 
 static Object* Runtime_ProfilerResume(Arguments args) {
   NoHandleAllocation ha;
-  ASSERT(args.length() == 1);
+  ASSERT(args.length() == 2);
 
   CONVERT_CHECKED(Smi, smi_modules, args[0]);
-  v8::V8::ResumeProfilerEx(smi_modules->value());
+  CONVERT_CHECKED(Smi, smi_tag, args[1]);
+  v8::V8::ResumeProfilerEx(smi_modules->value(), smi_tag->value());
   return Heap::undefined_value();
 }
 
 
 static Object* Runtime_ProfilerPause(Arguments args) {
   NoHandleAllocation ha;
-  ASSERT(args.length() == 1);
+  ASSERT(args.length() == 2);
 
   CONVERT_CHECKED(Smi, smi_modules, args[0]);
-  v8::V8::PauseProfilerEx(smi_modules->value());
+  CONVERT_CHECKED(Smi, smi_tag, args[1]);
+  v8::V8::PauseProfilerEx(smi_modules->value(), smi_tag->value());
   return Heap::undefined_value();
 }
 
diff --git a/src/runtime.h b/src/runtime.h
index b2b8609..e2e5c22 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -71,10 +71,8 @@
   F(IsExtensible, 1, 1) \
   \
   /* Utilities */ \
-  F(GetCalledFunction, 0, 1) \
   F(GetFunctionDelegate, 1, 1) \
   F(GetConstructorDelegate, 1, 1) \
-  F(NewArguments, 1, 1) \
   F(NewArgumentsFast, 3, 1) \
   F(LazyCompile, 1, 1) \
   F(SetNewFunctionAttributes, 1, 1) \
@@ -215,6 +213,8 @@
   F(ResolvePossiblyDirectEval, 3, 2) \
   \
   F(SetProperty, -1 /* 3 or 4 */, 1) \
+  F(DefineOrRedefineDataProperty, 4, 1) \
+  F(DefineOrRedefineAccessorProperty, 5, 1) \
   F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
   \
   /* Arrays */ \
@@ -266,7 +266,6 @@
   F(InitializeConstGlobal, 2, 1) \
   F(InitializeConstContextSlot, 3, 1) \
   F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
-  F(TransformToFastProperties, 1, 1) \
   \
   /* Debugging */ \
   F(DebugPrint, 1, 1) \
@@ -329,8 +328,8 @@
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
 #define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F) \
-  F(ProfilerResume, 1, 1) \
-  F(ProfilerPause, 1, 1)
+  F(ProfilerResume, 2, 1) \
+  F(ProfilerPause, 2, 1)
 #else
 #define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
 #endif
diff --git a/src/runtime.js b/src/runtime.js
index c4c855e..231763c 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -178,7 +178,7 @@
       y = %_ValueOf(y);
     } else {
       y = IS_NUMBER(y)
-          ? %NumberToString(y)
+          ? %_NumberToString(y)
           : %ToString(%ToPrimitive(y, NO_HINT));
     }
   }
@@ -194,7 +194,7 @@
       x = %_ValueOf(x);
     } else {
       x = IS_NUMBER(x)
-          ? %NumberToString(x)
+          ? %_NumberToString(x)
           : %ToString(%ToPrimitive(x, NO_HINT));
     }
   }
@@ -395,26 +395,20 @@
 
 
 function CALL_NON_FUNCTION() {
-  var callee = %GetCalledFunction();
-  var delegate = %GetFunctionDelegate(callee);
+  var delegate = %GetFunctionDelegate(this);
   if (!IS_FUNCTION(delegate)) {
-    throw %MakeTypeError('called_non_callable', [typeof callee]);
+    throw %MakeTypeError('called_non_callable', [typeof this]);
   }
-
-  var parameters = %NewArguments(delegate);
-  return delegate.apply(callee, parameters);
+  return delegate.apply(this, arguments);
 }
 
 
 function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
-  var callee = %GetCalledFunction();
-  var delegate = %GetConstructorDelegate(callee);
+  var delegate = %GetConstructorDelegate(this);
   if (!IS_FUNCTION(delegate)) {
-    throw %MakeTypeError('called_non_callable', [typeof callee]);
+    throw %MakeTypeError('called_non_callable', [typeof this]);
   }
-
-  var parameters = %NewArguments(delegate);
-  return delegate.apply(callee, parameters);
+  return delegate.apply(this, arguments);
 }
 
 
@@ -506,6 +500,16 @@
 }
 
 
+// ECMA-262, section 9.2, page 30
+function ToBoolean(x) {
+  if (IS_BOOLEAN(x)) return x;
+  if (IS_STRING(x)) return x.length != 0;
+  if (x == null) return false;
+  if (IS_NUMBER(x)) return !((x == 0) || NUMBER_IS_NAN(x));
+  return true;
+}
+
+
 // ECMA-262, section 9.3, page 31.
 function ToNumber(x) {
   if (IS_NUMBER(x)) return x;
@@ -519,23 +523,13 @@
 // ECMA-262, section 9.8, page 35.
 function ToString(x) {
   if (IS_STRING(x)) return x;
-  if (IS_NUMBER(x)) return %NumberToString(x);
+  if (IS_NUMBER(x)) return %_NumberToString(x);
   if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
   if (IS_UNDEFINED(x)) return 'undefined';
   return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
 }
 
 
-// ... where did this come from?
-function ToBoolean(x) {
-  if (IS_BOOLEAN(x)) return x;
-  if (IS_STRING(x)) return x.length != 0;
-  if (x == null) return false;
-  if (IS_NUMBER(x)) return !((x == 0) || NUMBER_IS_NAN(x));
-  return true;
-}
-
-
 // ECMA-262, section 9.9, page 36.
 function ToObject(x) {
   if (IS_STRING(x)) return new $String(x);
@@ -569,6 +563,25 @@
 }
 
 
+// ES5, section 9.12
+function SameValue(x, y) {
+  if (typeof x != typeof y) return false;
+  if (IS_NULL_OR_UNDEFINED(x)) return true;
+  if (IS_NUMBER(x)) {
+    if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
+    // x is +0 and y is -0 or vice versa
+    if (x === 0 && y === 0 && !%_IsSmi(x) && !%_IsSmi(y) && 
+        ((1 / x < 0 && 1 / y > 0) || (1 / x > 0 && 1 / y < 0))) {
+      return false;
+    }
+    return x == y;    
+  }
+  if (IS_STRING(x)) return %StringEquals(x, y);
+  if (IS_BOOLEAN(x))return %NumberEquals(%ToNumber(x),%ToNumber(y));
+
+  return %_ObjectEquals(x, y);
+}
+
 
 /* ---------------------------------
    - - -   U t i l i t i e s   - - -
diff --git a/src/simulator.h b/src/simulator.h
index 6f8cd5a..485e930 100644
--- a/src/simulator.h
+++ b/src/simulator.h
@@ -34,6 +34,8 @@
 #include "x64/simulator-x64.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/simulator-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/simulator-mips.h"
 #else
 #error Unsupported target architecture.
 #endif
diff --git a/src/string.js b/src/string.js
index ed938ec..ba01ed6 100644
--- a/src/string.js
+++ b/src/string.js
@@ -87,14 +87,18 @@
 
 // ECMA-262, section 15.5.4.6
 function StringConcat() {
-  var len = %_ArgumentsLength() + 1;
-  var parts = new $Array(len);
-  parts[0] = IS_STRING(this) ? this : ToString(this);
-  for (var i = 1; i < len; i++) {
-    var part = %_Arguments(i - 1);
-    parts[i] = IS_STRING(part) ? part : ToString(part);
+  var len = %_ArgumentsLength();
+  var this_as_string = IS_STRING(this) ? this : ToString(this);
+  if (len === 1) {
+    return this_as_string + %_Arguments(0);
   }
-  return %StringBuilderConcat(parts, len, "");
+  var parts = new $Array(len + 1);
+  parts[0] = this_as_string;
+  for (var i = 0; i < len; i++) {
+    var part = %_Arguments(i);
+    parts[i + 1] = IS_STRING(part) ? part : ToString(part);
+  }
+  return %StringBuilderConcat(parts, len + 1, "");
 }
 
 // Match ES3 and Safari
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 81f89fd..3adaa40 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -484,7 +484,10 @@
   Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     CallStubCompiler compiler(argc, in_loop);
-    code = compiler.CompileCallField(object, holder, index, name);
+    code = compiler.CompileCallField(JSObject::cast(object),
+                                     holder,
+                                     index,
+                                     name);
     if (code->IsFailure()) return code;
     ASSERT_EQ(flags, Code::cast(code)->flags());
     LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
@@ -518,7 +521,9 @@
   Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     CallStubCompiler compiler(argc, NOT_IN_LOOP);
-    code = compiler.CompileCallInterceptor(object, holder, name);
+    code = compiler.CompileCallInterceptor(JSObject::cast(object),
+                                           holder,
+                                           name);
     if (code->IsFailure()) return code;
     ASSERT_EQ(flags, Code::cast(code)->flags());
     LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
@@ -920,6 +925,13 @@
 }
 
 
+Object* KeyedLoadPropertyWithInterceptor(Arguments args) {
+  JSObject* receiver = JSObject::cast(args[0]);
+  uint32_t index = Smi::cast(args[1])->value();
+  return receiver->GetElementWithInterceptor(receiver, index);
+}
+
+
 Object* StubCompiler::CompileCallInitialize(Code::Flags flags) {
   HandleScope scope;
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
@@ -1058,11 +1070,13 @@
   return GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
 }
 
+
 void StubCompiler::LookupPostInterceptor(JSObject* holder,
                                          String* name,
                                          LookupResult* lookup) {
   holder->LocalLookupRealNamedProperty(name, lookup);
-  if (lookup->IsNotFound()) {
+  if (!lookup->IsProperty()) {
+    lookup->NotFound();
     Object* proto = holder->GetPrototype();
     if (proto != Heap::null_value()) {
       proto->Lookup(name, lookup);
diff --git a/src/stub-cache.h b/src/stub-cache.h
index d97fe77..43354db 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -312,6 +312,7 @@
 Object* LoadPropertyWithInterceptorForCall(Arguments args);
 Object* StoreInterceptorProperty(Arguments args);
 Object* CallInterceptorProperty(Arguments args);
+Object* KeyedLoadPropertyWithInterceptor(Arguments args);
 
 
 // Support function for computing call IC miss stubs.
@@ -346,6 +347,7 @@
   static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
                                                   int index,
                                                   Register prototype);
+
   static void GenerateFastPropertyLoad(MacroAssembler* masm,
                                        Register dst, Register src,
                                        JSObject* holder, int index);
@@ -354,22 +356,20 @@
                                       Register receiver,
                                       Register scratch,
                                       Label* miss_label);
+
   static void GenerateLoadStringLength(MacroAssembler* masm,
                                        Register receiver,
-                                       Register scratch,
+                                       Register scratch1,
+                                       Register scratch2,
                                        Label* miss_label);
-  static void GenerateLoadStringLength2(MacroAssembler* masm,
-                                        Register receiver,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Label* miss_label);
+
   static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
                                             Register receiver,
                                             Register scratch1,
                                             Register scratch2,
                                             Label* miss_label);
+
   static void GenerateStoreField(MacroAssembler* masm,
-                                 Builtins::Name storage_extend,
                                  JSObject* object,
                                  int index,
                                  Map* transition,
@@ -377,16 +377,30 @@
                                  Register name_reg,
                                  Register scratch,
                                  Label* miss_label);
+
   static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
 
   // Check the integrity of the prototype chain to make sure that the
   // current IC is still valid.
+
   Register CheckPrototypes(JSObject* object,
                            Register object_reg,
                            JSObject* holder,
                            Register holder_reg,
                            Register scratch,
                            String* name,
+                           Label* miss) {
+    return CheckPrototypes(object, object_reg, holder, holder_reg, scratch,
+                           name, kInvalidProtoDepth, miss);
+  }
+
+  Register CheckPrototypes(JSObject* object,
+                           Register object_reg,
+                           JSObject* holder,
+                           Register holder_reg,
+                           Register scratch,
+                           String* name,
+                           int save_at_depth,
                            Label* miss);
 
  protected:
@@ -538,7 +552,7 @@
   explicit CallStubCompiler(int argc, InLoopFlag in_loop)
       : arguments_(argc), in_loop_(in_loop) { }
 
-  Object* CompileCallField(Object* object,
+  Object* CompileCallField(JSObject* object,
                            JSObject* holder,
                            int index,
                            String* name);
@@ -547,7 +561,7 @@
                               JSFunction* function,
                               String* name,
                               CheckType check);
-  Object* CompileCallInterceptor(Object* object,
+  Object* CompileCallInterceptor(JSObject* object,
                                  JSObject* holder,
                                  String* name);
   Object* CompileCallGlobal(JSObject* object,
diff --git a/src/top.cc b/src/top.cc
index 0274838..b9db4be 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -949,10 +949,15 @@
 }
 
 
+bool Top::CanHaveSpecialFunctions(JSObject* object) {
+  return object->IsJSArray();
+}
+
+
 Object* Top::LookupSpecialFunction(JSObject* receiver,
                                    JSObject* prototype,
                                    JSFunction* function) {
-  if (receiver->IsJSArray()) {
+  if (CanHaveSpecialFunctions(receiver)) {
     FixedArray* table = context()->global_context()->special_function_table();
     for (int index = 0; index < table->length(); index +=3) {
       if ((prototype == table->get(index)) &&
diff --git a/src/top.h b/src/top.h
index 8780844..ddc73ba 100644
--- a/src/top.h
+++ b/src/top.h
@@ -342,6 +342,7 @@
     return Handle<JSBuiltinsObject>(thread_local_.context_->builtins());
   }
 
+  static bool CanHaveSpecialFunctions(JSObject* object);
   static Object* LookupSpecialFunction(JSObject* receiver,
                                        JSObject* prototype,
                                        JSFunction* value);
diff --git a/src/utils.cc b/src/utils.cc
index 374385b..45a4cd6 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -51,43 +51,6 @@
 }
 
 
-byte* EncodeInt(byte* p, int x) {
-  while (x < -64 || x >= 64) {
-    *p++ = static_cast<byte>(x & 127);
-    x = ArithmeticShiftRight(x, 7);
-  }
-  // -64 <= x && x < 64
-  *p++ = static_cast<byte>(x + 192);
-  return p;
-}
-
-
-byte* DecodeInt(byte* p, int* x) {
-  int r = 0;
-  unsigned int s = 0;
-  byte b = *p++;
-  while (b < 128) {
-    r |= static_cast<int>(b) << s;
-    s += 7;
-    b = *p++;
-  }
-  // b >= 128
-  *x = r | ((static_cast<int>(b) - 192) << s);
-  return p;
-}
-
-
-byte* EncodeUnsignedIntBackward(byte* p, unsigned int x) {
-  while (x >= 128) {
-    *--p = static_cast<byte>(x & 127);
-    x = x >> 7;
-  }
-  // x < 128
-  *--p = static_cast<byte>(x + 128);
-  return p;
-}
-
-
 // Thomas Wang, Integer Hash Functions.
 // http://www.concentric.net/~Ttwang/tech/inthash.htm
 uint32_t ComputeIntegerHash(uint32_t key) {
diff --git a/src/utils.h b/src/utils.h
index 0fd24ec..c59ca25 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -174,48 +174,6 @@
 
 
 // ----------------------------------------------------------------------------
-// Support for compressed, machine-independent encoding
-// and decoding of integer values of arbitrary size.
-
-// Encoding and decoding from/to a buffer at position p;
-// the result is the position after the encoded integer.
-// Small signed integers in the range -64 <= x && x < 64
-// are encoded in 1 byte; larger values are encoded in 2
-// or more bytes. At most sizeof(int) + 1 bytes are used
-// in the worst case.
-byte* EncodeInt(byte* p, int x);
-byte* DecodeInt(byte* p, int* x);
-
-
-// Encoding and decoding from/to a buffer at position p - 1
-// moving backward; the result is the position of the last
-// byte written. These routines are useful to read/write
-// into a buffer starting at the end of the buffer.
-byte* EncodeUnsignedIntBackward(byte* p, unsigned int x);
-
-// The decoding function is inlined since its performance is
-// important to mark-sweep garbage collection.
-inline byte* DecodeUnsignedIntBackward(byte* p, unsigned int* x) {
-  byte b = *--p;
-  if (b >= 128) {
-    *x = static_cast<unsigned int>(b) - 128;
-    return p;
-  }
-  unsigned int r = static_cast<unsigned int>(b);
-  unsigned int s = 7;
-  b = *--p;
-  while (b < 128) {
-    r |= static_cast<unsigned int>(b) << s;
-    s += 7;
-    b = *--p;
-  }
-  // b >= 128
-  *x = r | ((static_cast<unsigned int>(b) - 128) << s);
-  return p;
-}
-
-
-// ----------------------------------------------------------------------------
 // Hash function.
 
 uint32_t ComputeIntegerHash(uint32_t key);
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 7397c30..eaac2db 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -100,70 +100,76 @@
   SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize)
 
 
-#define STATS_COUNTER_LIST_2(SC)                                    \
-  /* Number of code stubs. */                                       \
-  SC(code_stubs, V8.CodeStubs)                                      \
-  /* Amount of stub code. */                                        \
-  SC(total_stubs_code_size, V8.TotalStubsCodeSize)                  \
-  /* Amount of (JS) compiled code. */                               \
-  SC(total_compiled_code_size, V8.TotalCompiledCodeSize)            \
-  SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest) \
-  SC(gc_compactor_caused_by_promoted_data,                          \
-     V8.GCCompactorCausedByPromotedData)                            \
-  SC(gc_compactor_caused_by_oldspace_exhaustion,                    \
-     V8.GCCompactorCausedByOldspaceExhaustion)                      \
-  SC(gc_compactor_caused_by_weak_handles,                           \
-     V8.GCCompactorCausedByWeakHandles)                             \
-  SC(gc_last_resort_from_js, V8.GCLastResortFromJS)                 \
-  SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles)       \
-  /* How is the generic keyed-load stub used? */                    \
-  SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi)                \
-  SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)          \
-  SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow)              \
-  SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
-  /* Count how much the monomorphic keyed-load stubs are hit. */    \
-  SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype)  \
-  SC(keyed_load_string_length, V8.KeyedLoadStringLength)            \
-  SC(keyed_load_array_length, V8.KeyedLoadArrayLength)              \
-  SC(keyed_load_constant_function, V8.KeyedLoadConstantFunction)    \
-  SC(keyed_load_field, V8.KeyedLoadField)                           \
-  SC(keyed_load_callback, V8.KeyedLoadCallback)                     \
-  SC(keyed_load_interceptor, V8.KeyedLoadInterceptor)               \
-  SC(keyed_load_inline, V8.KeyedLoadInline)                         \
-  SC(keyed_load_inline_miss, V8.KeyedLoadInlineMiss)                \
-  SC(named_load_inline, V8.NamedLoadInline)                         \
-  SC(named_load_inline_miss, V8.NamedLoadInlineMiss)                \
-  SC(named_load_global_inline, V8.NamedLoadGlobalInline)            \
-  SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss)   \
-  SC(keyed_store_field, V8.KeyedStoreField)                         \
-  SC(keyed_store_inline, V8.KeyedStoreInline)                       \
-  SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss)              \
-  SC(named_store_global_inline, V8.NamedStoreGlobalInline)          \
-  SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
-  SC(call_global_inline, V8.CallGlobalInline)                       \
-  SC(call_global_inline_miss, V8.CallGlobalInlineMiss)              \
-  SC(constructed_objects, V8.ConstructedObjects)                    \
-  SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime)     \
-  SC(constructed_objects_stub, V8.ConstructedObjectsStub)           \
-  SC(array_function_runtime, V8.ArrayFunctionRuntime)               \
-  SC(array_function_native, V8.ArrayFunctionNative)                 \
-  SC(for_in, V8.ForIn)                                              \
-  SC(enum_cache_hits, V8.EnumCacheHits)                             \
-  SC(enum_cache_misses, V8.EnumCacheMisses)                         \
-  SC(reloc_info_count, V8.RelocInfoCount)                           \
-  SC(reloc_info_size, V8.RelocInfoSize)                             \
-  SC(zone_segment_bytes, V8.ZoneSegmentBytes)                       \
-  SC(compute_entry_frame, V8.ComputeEntryFrame)                     \
-  SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls)          \
-  SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs) \
-  SC(string_add_runtime, V8.StringAddRuntime)                       \
-  SC(string_add_native, V8.StringAddNative)                         \
-  SC(sub_string_runtime, V8.SubStringRuntime)                       \
-  SC(sub_string_native, V8.SubStringNative)                         \
-  SC(string_compare_native, V8.StringCompareNative)                 \
-  SC(string_compare_runtime, V8.StringCompareRuntime)               \
-  SC(regexp_entry_runtime, V8.RegExpEntryRuntime)                   \
-  SC(regexp_entry_native, V8.RegExpEntryNative)
+#define STATS_COUNTER_LIST_2(SC)                                      \
+  /* Number of code stubs. */                                         \
+  SC(code_stubs, V8.CodeStubs)                                        \
+  /* Amount of stub code. */                                          \
+  SC(total_stubs_code_size, V8.TotalStubsCodeSize)                    \
+  /* Amount of (JS) compiled code. */                                 \
+  SC(total_compiled_code_size, V8.TotalCompiledCodeSize)              \
+  SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest)   \
+  SC(gc_compactor_caused_by_promoted_data,                            \
+     V8.GCCompactorCausedByPromotedData)                              \
+  SC(gc_compactor_caused_by_oldspace_exhaustion,                      \
+     V8.GCCompactorCausedByOldspaceExhaustion)                        \
+  SC(gc_compactor_caused_by_weak_handles,                             \
+     V8.GCCompactorCausedByWeakHandles)                               \
+  SC(gc_last_resort_from_js, V8.GCLastResortFromJS)                   \
+  SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles)         \
+  /* How is the generic keyed-load stub used? */                      \
+  SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi)                  \
+  SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)            \
+  SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow)                \
+  SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow)   \
+  /* Count how much the monomorphic keyed-load stubs are hit. */      \
+  SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype)    \
+  SC(keyed_load_string_length, V8.KeyedLoadStringLength)              \
+  SC(keyed_load_array_length, V8.KeyedLoadArrayLength)                \
+  SC(keyed_load_constant_function, V8.KeyedLoadConstantFunction)      \
+  SC(keyed_load_field, V8.KeyedLoadField)                             \
+  SC(keyed_load_callback, V8.KeyedLoadCallback)                       \
+  SC(keyed_load_interceptor, V8.KeyedLoadInterceptor)                 \
+  SC(keyed_load_inline, V8.KeyedLoadInline)                           \
+  SC(keyed_load_inline_miss, V8.KeyedLoadInlineMiss)                  \
+  SC(named_load_inline, V8.NamedLoadInline)                           \
+  SC(named_load_inline_miss, V8.NamedLoadInlineMiss)                  \
+  SC(named_load_global_inline, V8.NamedLoadGlobalInline)              \
+  SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss)     \
+  SC(keyed_store_field, V8.KeyedStoreField)                           \
+  SC(keyed_store_inline, V8.KeyedStoreInline)                         \
+  SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss)                \
+  SC(named_store_global_inline, V8.NamedStoreGlobalInline)            \
+  SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss)   \
+  SC(call_const, V8.CallConst)                                        \
+  SC(call_const_fast_api, V8.CallConstFastApi)                        \
+  SC(call_const_interceptor, V8.CallConstInterceptor)                 \
+  SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi) \
+  SC(call_global_inline, V8.CallGlobalInline)                         \
+  SC(call_global_inline_miss, V8.CallGlobalInlineMiss)                \
+  SC(constructed_objects, V8.ConstructedObjects)                      \
+  SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime)       \
+  SC(constructed_objects_stub, V8.ConstructedObjectsStub)             \
+  SC(array_function_runtime, V8.ArrayFunctionRuntime)                 \
+  SC(array_function_native, V8.ArrayFunctionNative)                   \
+  SC(for_in, V8.ForIn)                                                \
+  SC(enum_cache_hits, V8.EnumCacheHits)                               \
+  SC(enum_cache_misses, V8.EnumCacheMisses)                           \
+  SC(reloc_info_count, V8.RelocInfoCount)                             \
+  SC(reloc_info_size, V8.RelocInfoSize)                               \
+  SC(zone_segment_bytes, V8.ZoneSegmentBytes)                         \
+  SC(compute_entry_frame, V8.ComputeEntryFrame)                       \
+  SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls)            \
+  SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs)   \
+  SC(string_add_runtime, V8.StringAddRuntime)                         \
+  SC(string_add_native, V8.StringAddNative)                           \
+  SC(sub_string_runtime, V8.SubStringRuntime)                         \
+  SC(sub_string_native, V8.SubStringNative)                           \
+  SC(string_compare_native, V8.StringCompareNative)                   \
+  SC(string_compare_runtime, V8.StringCompareRuntime)                 \
+  SC(regexp_entry_runtime, V8.RegExpEntryRuntime)                     \
+  SC(regexp_entry_native, V8.RegExpEntryNative)                       \
+  SC(number_to_string_native, V8.NumberToStringNative)                \
+  SC(number_to_string_runtime, V8.NumberToStringRuntime)
 
 // This file contains all the v8 counters that are in use.
 class Counters : AllStatic {
diff --git a/src/v8.cc b/src/v8.cc
index 3bec827..3953361 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -114,8 +114,11 @@
 
   OProfileAgent::Initialize();
 
-  if (FLAG_log_code) {
+  // If we are deserializing, log non-function code objects and compiled
+  // functions found in the snapshot.
+  if (des != NULL && FLAG_log_code) {
     HandleScope scope;
+    LOG(LogCodeObjects());
     LOG(LogCompiledFunctions());
   }
 
diff --git a/src/v8natives.js b/src/v8natives.js
index 7475065..6a32d7b 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -56,7 +56,7 @@
     %FunctionSetName(f, key);
     %SetProperty(object, key, f, attributes);
   }
-  %TransformToFastProperties(object);
+  %ToFastProperties(object);
 }
 
 // Emulates JSC by installing functions on a hidden prototype that
@@ -307,7 +307,7 @@
 
 // ES5 8.10.4
 function FromPropertyDescriptor(desc) {
-  if(IS_UNDEFINED(desc)) return desc;
+  if (IS_UNDEFINED(desc)) return desc;
   var obj = new $Object();
   if (IsDataDescriptor(desc)) {
     obj.value = desc.getValue();
@@ -333,7 +333,6 @@
     desc.setEnumerable(ToBoolean(obj.enumerable));
   }
 
-
   if ("configurable" in obj) {
     desc.setConfigurable(ToBoolean(obj.configurable));
   }
@@ -377,7 +376,9 @@
   this.writable_ = false;
   this.hasWritable_ = false;
   this.enumerable_ = false;
+  this.hasEnumerable_ = false;
   this.configurable_ = false;
+  this.hasConfigurable_ = false;
   this.get_ = void 0;
   this.hasGetter_ = false;
   this.set_ = void 0;
@@ -396,8 +397,14 @@
 }
 
 
+PropertyDescriptor.prototype.hasValue = function() {
+  return this.hasValue_;
+}
+
+
 PropertyDescriptor.prototype.setEnumerable = function(enumerable) {
   this.enumerable_ = enumerable;
+  this.hasEnumerable_ = true;
 }
 
 
@@ -406,6 +413,11 @@
 }
 
 
+PropertyDescriptor.prototype.hasEnumerable = function() {
+  return this.hasEnumerable_;
+}
+
+
 PropertyDescriptor.prototype.setWritable = function(writable) {
   this.writable_ = writable;
   this.hasWritable_ = true;
@@ -419,6 +431,12 @@
 
 PropertyDescriptor.prototype.setConfigurable = function(configurable) {
   this.configurable_ = configurable;
+  this.hasConfigurable_ = true;
+}
+
+
+PropertyDescriptor.prototype.hasConfigurable = function() {
+  return this.hasConfigurable_;
 }
 
 
@@ -438,6 +456,11 @@
 }
 
 
+PropertyDescriptor.prototype.hasGetter = function() {
+  return this.hasGetter_;
+}
+
+
 PropertyDescriptor.prototype.setSet = function(set) {
   this.set_ = set;
   this.hasSetter_ = true;
@@ -449,6 +472,12 @@
 }
 
 
+PropertyDescriptor.prototype.hasSetter = function() {
+  return this.hasSetter_;
+}
+
+
+
 // ES5 section 8.12.1.
 function GetOwnProperty(obj, p) {
   var desc = new PropertyDescriptor();
@@ -458,8 +487,7 @@
   //  obj is an accessor [true, Get, Set, Enumerable, Configurable]
   var props = %GetOwnProperty(ToObject(obj), ToString(p));
 
-  if (IS_UNDEFINED(props))
-    return void 0;
+  if (IS_UNDEFINED(props)) return void 0;
 
   // This is an accessor
   if (props[0]) {
@@ -476,16 +504,89 @@
 }
 
 
-// ES5 8.12.9.  This version cannot cope with the property p already
-// being present on obj.
+// ES5 section 8.12.2.
+function GetProperty(obj, p) {
+  var prop = GetOwnProperty(obj);
+  if (!IS_UNDEFINED(prop)) return prop;
+  var proto = obj.__proto__;
+  if (IS_NULL(proto)) return void 0;
+  return GetProperty(proto, p);
+}
+
+
+// ES5 section 8.12.6
+function HasProperty(obj, p) {
+  var desc = GetProperty(obj, p);
+  return IS_UNDEFINED(desc) ? false : true;
+}
+
+
+// ES5 8.12.9.  
 function DefineOwnProperty(obj, p, desc, should_throw) {
-  var flag = desc.isEnumerable() ? 0 : DONT_ENUM;
-  if (IsDataDescriptor(desc)) {
-    flag |= desc.isWritable() ? 0 : (DONT_DELETE | READ_ONLY);
-    %SetProperty(obj, p, desc.getValue(), flag);
+  var current = GetOwnProperty(obj, p);
+  var extensible = %IsExtensible(ToObject(obj));
+
+  // Error handling according to spec.
+  // Step 3
+  if (IS_UNDEFINED(current) && !extensible)
+    throw MakeTypeError("define_disallowed", ["defineProperty"]);
+
+  if (!IS_UNDEFINED(current) && !current.isConfigurable()) {
+    // Step 7
+    if (desc.isConfigurable() ||  desc.isEnumerable() != current.isEnumerable())
+      throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+    // Step 9
+    if (IsDataDescriptor(current) != IsDataDescriptor(desc))
+      throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+    // Step 10
+    if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
+      if (!current.isWritable() && desc.isWritable())
+        throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+      if (!current.isWritable() && desc.hasValue() &&
+          !SameValue(desc.getValue(), current.getValue())) {
+        throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+      }
+    }
+    // Step 11
+    if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
+      if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())){
+        throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+      }
+      if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet()))
+        throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+    }
+  }
+
+  // Send flags - enumerable and configurable are common - writable is 
+  // only send to the data descriptor.
+  // Take special care if enumerable and configurable is not defined on
+  // desc (we need to preserve the existing values from current).
+  var flag = NONE;
+  if (desc.hasEnumerable()) {
+    flag |= desc.isEnumerable() ? 0 : DONT_ENUM;
+  } else if (!IS_UNDEFINED(current)) {
+    flag |= current.isEnumerable() ? 0 : DONT_ENUM;
   } else {
-    if (IS_FUNCTION(desc.getGet())) %DefineAccessor(obj, p, GETTER, desc.getGet(), flag);
-    if (IS_FUNCTION(desc.getSet())) %DefineAccessor(obj, p, SETTER, desc.getSet(), flag);
+    flag |= DONT_ENUM;
+  }
+
+  if (desc.hasConfigurable()) {
+    flag |= desc.isConfigurable() ? 0 : DONT_DELETE;
+  } else if (!IS_UNDEFINED(current)) {
+    flag |= current.isConfigurable() ? 0 : DONT_DELETE;
+  } else
+    flag |= DONT_DELETE;
+
+  if (IsDataDescriptor(desc) || IsGenericDescriptor(desc)) {
+    flag |= desc.isWritable() ? 0 : READ_ONLY;
+    %DefineOrRedefineDataProperty(obj, p, desc.getValue(), flag);
+  } else {
+    if (desc.hasGetter() && IS_FUNCTION(desc.getGet())) {
+       %DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
+    }
+    if (desc.hasSetter() && IS_FUNCTION(desc.getSet())) {
+      %DefineOrRedefineAccessorProperty(obj, p, SETTER, desc.getSet(), flag);
+    }
   }
   return true;
 }
@@ -522,9 +623,8 @@
   if (%GetInterceptorInfo(obj) & 1) {
     var indexedInterceptorNames =
         %GetIndexedInterceptorElementNames(obj);
-    if (indexedInterceptorNames) {
+    if (indexedInterceptorNames)
       propertyNames = propertyNames.concat(indexedInterceptorNames);
-    }
   }
 
   // Find all the named properties.
@@ -542,6 +642,10 @@
     }
   }
 
+  // Property names are expected to be strings.
+  for (var i = 0; i < propertyNames.length; ++i)
+    propertyNames[i] = ToString(propertyNames[i]);
+
   return propertyNames;
 }
 
@@ -558,10 +662,21 @@
 }
 
 
-// ES5 section 15.2.3.7.  This version cannot cope with the properies already
-// being present on obj.  Therefore it is not exposed as
-// Object.defineProperties yet.
+// ES5 section 15.2.3.6.
+function ObjectDefineProperty(obj, p, attributes) {
+  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+    throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
+  var name = ToString(p);
+  var desc = ToPropertyDescriptor(attributes);
+  DefineOwnProperty(obj, name, desc, true);
+  return obj;
+}
+
+
+// ES5 section 15.2.3.7.
 function ObjectDefineProperties(obj, properties) {
+ if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+    throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
   var props = ToObject(properties);
   var key_values = [];
   for (var key in props) {
@@ -577,6 +692,7 @@
     var desc = key_values[i + 1];
     DefineOwnProperty(obj, key, desc, true);
   }
+  return obj;
 }
 
 
@@ -611,6 +727,8 @@
   InstallFunctions($Object, DONT_ENUM, $Array(
     "keys", ObjectKeys,
     "create", ObjectCreate,
+    "defineProperty", ObjectDefineProperty,
+    "defineProperties", ObjectDefineProperties,
     "getPrototypeOf", ObjectGetPrototypeOf,
     "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
     "getOwnPropertyNames", ObjectGetOwnPropertyNames
@@ -796,7 +914,7 @@
                "POSITIVE_INFINITY",
                1/0,
                DONT_ENUM | DONT_DELETE | READ_ONLY);
-  %TransformToFastProperties($Number);
+  %ToFastProperties($Number);
 
   // Setup non-enumerable functions on the Number prototype object.
   InstallFunctions($Number.prototype, DONT_ENUM, $Array(
diff --git a/src/version.cc b/src/version.cc
index 67e1e02..6501afb 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     1
-#define BUILD_NUMBER      0
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      1
+#define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/virtual-frame.cc b/src/virtual-frame.cc
index 44e5fae..3624e25 100644
--- a/src/virtual-frame.cc
+++ b/src/virtual-frame.cc
@@ -48,7 +48,13 @@
 }
 
 
-FrameElement VirtualFrame::CopyElementAt(int index) {
+// Create a duplicate of an existing valid frame element.
+// We can pass an optional number type information that will override the
+// existing information about the backing element. The new information must
+// not conflict with the existing type information and must be equally or
+// more precise. The default parameter value kUninitialized means that there
+// is no additional information.
+FrameElement VirtualFrame::CopyElementAt(int index, NumberInfo::Type info) {
   ASSERT(index >= 0);
   ASSERT(index < element_count());
 
@@ -71,15 +77,26 @@
       // Fall through.
 
     case FrameElement::MEMORY:  // Fall through.
-    case FrameElement::REGISTER:
+    case FrameElement::REGISTER: {
       // All copies are backed by memory or register locations.
       result.set_type(FrameElement::COPY);
       result.clear_copied();
       result.clear_sync();
       result.set_index(index);
       elements_[index].set_copied();
+      // Update backing element's number information.
+      NumberInfo::Type existing = elements_[index].number_info();
+      ASSERT(existing != NumberInfo::kUninitialized);
+      // Assert that the new type information (a) does not conflict with the
+      // existing one and (b) is equally or more precise.
+      ASSERT((info == NumberInfo::kUninitialized) ||
+             (existing | info) != NumberInfo::kUninitialized);
+      ASSERT(existing <= info);
+      elements_[index].set_number_info(info != NumberInfo::kUninitialized
+                                       ? info
+                                       : existing);
       break;
-
+    }
     case FrameElement::INVALID:
       // We should not try to copy invalid elements.
       UNREACHABLE();
@@ -98,7 +115,7 @@
   ASSERT(stack_pointer_ == element_count() - 1);
 
   for (int i = 0; i < count; i++) {
-    elements_.Add(FrameElement::MemoryElement());
+    elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
   }
   stack_pointer_ += count;
 }
@@ -144,8 +161,16 @@
   if (!elements_[index].is_valid()) return;
 
   SyncElementAt(index);
+  // Number type information is preserved.
+  // Copies get their number information from their backing element.
+  NumberInfo::Type info;
+  if (!elements_[index].is_copy()) {
+    info = elements_[index].number_info();
+  } else {
+    info = elements_[elements_[index].index()].number_info();
+  }
   // The element is now in memory.  Its copied flag is preserved.
-  FrameElement new_element = FrameElement::MemoryElement();
+  FrameElement new_element = FrameElement::MemoryElement(info);
   if (elements_[index].is_copied()) {
     new_element.set_copied();
   }
@@ -268,7 +293,6 @@
 
   InvalidateFrameSlotAt(frame_index);
 
-  FrameElement new_element;
   if (value->is_register()) {
     if (is_used(value->reg())) {
       // The register already appears on the frame.  Either the existing
@@ -301,7 +325,8 @@
       Use(value->reg(), frame_index);
       elements_[frame_index] =
           FrameElement::RegisterElement(value->reg(),
-                                        FrameElement::NOT_SYNCED);
+                                        FrameElement::NOT_SYNCED,
+                                        value->number_info());
     }
   } else {
     ASSERT(value->is_constant());
@@ -318,16 +343,15 @@
 }
 
 
-void VirtualFrame::Push(Register reg) {
+void VirtualFrame::Push(Register reg, NumberInfo::Type info) {
   if (is_used(reg)) {
     int index = register_location(reg);
-    FrameElement element = CopyElementAt(index);
+    FrameElement element = CopyElementAt(index, info);
     elements_.Add(element);
   } else {
     Use(reg, element_count());
     FrameElement element =
-        FrameElement::RegisterElement(reg,
-                                      FrameElement::NOT_SYNCED);
+        FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED, info);
     elements_.Add(element);
   }
 }
diff --git a/src/virtual-frame.h b/src/virtual-frame.h
index 0bf0ca2..220823e 100644
--- a/src/virtual-frame.h
+++ b/src/virtual-frame.h
@@ -37,6 +37,8 @@
 #include "x64/virtual-frame-x64.h"
 #elif V8_TARGET_ARCH_ARM
 #include "arm/virtual-frame-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/virtual-frame-mips.h"
 #else
 #error Unsupported target architecture.
 #endif
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 9cfe98a..1f97235 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -224,7 +224,7 @@
 // -----------------------------------------------------------------------------
 // Implementation of Operand
 
-Operand::Operand(Register base, int32_t disp): rex_(0) {
+Operand::Operand(Register base, int32_t disp) : rex_(0) {
   len_ = 1;
   if (base.is(rsp) || base.is(r12)) {
     // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
@@ -246,7 +246,7 @@
 Operand::Operand(Register base,
                  Register index,
                  ScaleFactor scale,
-                 int32_t disp): rex_(0) {
+                 int32_t disp) : rex_(0) {
   ASSERT(!index.is(rsp));
   len_ = 1;
   set_sib(scale, index, base);
@@ -264,8 +264,19 @@
 }
 
 
+Operand::Operand(Register index,
+                 ScaleFactor scale,
+                 int32_t disp) : rex_(0) {
+  ASSERT(!index.is(rsp));
+  len_ = 1;
+  set_modrm(0, rsp);
+  set_sib(scale, index, rbp);
+  set_disp32(disp);
+}
+
+
 // -----------------------------------------------------------------------------
-// Implementation of Assembler
+// Implementation of Assembler.
 
 #ifdef GENERATED_CODE_COVERAGE
 static void InitCoverageLog();
@@ -276,7 +287,7 @@
 Assembler::Assembler(void* buffer, int buffer_size)
     : code_targets_(100) {
   if (buffer == NULL) {
-    // do our own buffer management
+    // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
       buffer_size = kMinimalBufferSize;
 
@@ -293,7 +304,7 @@
     buffer_size_ = buffer_size;
     own_buffer_ = true;
   } else {
-    // use externally provided buffer instead
+    // Use externally provided buffer instead.
     ASSERT(buffer_size > 0);
     buffer_ = static_cast<byte*>(buffer);
     buffer_size_ = buffer_size;
@@ -309,7 +320,7 @@
   }
 #endif
 
-  // setup buffer pointers
+  // Setup buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -337,11 +348,10 @@
 
 
 void Assembler::GetCode(CodeDesc* desc) {
-  // finalize code
-  // (at this point overflow() may be true, but the gap ensures that
-  // we are still not overlapping instructions and relocation info)
-  ASSERT(pc_ <= reloc_info_writer.pos());  // no overlap
-  // setup desc
+  // Finalize code (at this point overflow() may be true, but the gap ensures
+  // that we are still not overlapping instructions and relocation info).
+  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
+  // Setup code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -370,7 +380,7 @@
     int current = L->pos();
     int next = long_at(current);
     while (next != current) {
-      // relative address, relative to point after address
+      // Relative address, relative to point after address.
       int imm32 = pos - (current + sizeof(int32_t));
       long_at_put(current, imm32);
       current = next;
@@ -390,10 +400,10 @@
 
 
 void Assembler::GrowBuffer() {
-  ASSERT(buffer_overflow());  // should not call this otherwise
+  ASSERT(buffer_overflow());
   if (!own_buffer_) FATAL("external code buffer is too small");
 
-  // compute new buffer size
+  // Compute new buffer size.
   CodeDesc desc;  // the new buffer
   if (buffer_size_ < 4*KB) {
     desc.buffer_size = 4*KB;
@@ -407,7 +417,7 @@
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
-  // setup new buffer
+  // Setup new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
   desc.instr_size = pc_offset();
   desc.reloc_size =
@@ -419,7 +429,7 @@
   memset(desc.buffer, 0xCC, desc.buffer_size);
 #endif
 
-  // copy the data
+  // Copy the data.
   intptr_t pc_delta = desc.buffer - buffer_;
   intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
       (buffer_ + buffer_size_);
@@ -427,7 +437,7 @@
   memmove(rc_delta + reloc_info_writer.pos(),
           reloc_info_writer.pos(), desc.reloc_size);
 
-  // switch buffers
+  // Switch buffers.
   if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
     spare_buffer_ = buffer_;
   } else {
@@ -442,7 +452,7 @@
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
-  // relocate runtime entries
+  // Relocate runtime entries.
   for (RelocIterator it(desc); !it.done(); it.next()) {
     RelocInfo::Mode rmode = it.rinfo()->rmode();
     if (rmode == RelocInfo::INTERNAL_REFERENCE) {
@@ -472,7 +482,7 @@
 }
 
 
-// Assembler Instruction implementations
+// Assembler Instruction implementations.
 
 void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
   EnsureSpace ensure_space(this);
@@ -756,7 +766,7 @@
 void Assembler::call(Label* L) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // 1110 1000 #32-bit disp
+  // 1110 1000 #32-bit disp.
   emit(0xE8);
   if (L->is_bound()) {
     int offset = L->pos() - pc_offset() - sizeof(int32_t);
@@ -777,7 +787,7 @@
 void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // 1110 1000 #32-bit disp
+  // 1110 1000 #32-bit disp.
   emit(0xE8);
   emit_code_target(target, rmode);
 }
@@ -786,7 +796,7 @@
 void Assembler::call(Register adr) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: FF /2 r64
+  // Opcode: FF /2 r64.
   if (adr.high_bit()) {
     emit_rex_64(adr);
   }
@@ -798,7 +808,7 @@
 void Assembler::call(const Operand& op) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: FF /2 m64
+  // Opcode: FF /2 m64.
   emit_rex_64(op);
   emit(0xFF);
   emit_operand(2, op);
@@ -829,7 +839,7 @@
   ASSERT(cc >= 0);  // Use mov for unconditional moves.
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: REX.W 0f 40 + cc /r
+  // Opcode: REX.W 0f 40 + cc /r.
   emit_rex_64(dst, src);
   emit(0x0f);
   emit(0x40 + cc);
@@ -846,7 +856,7 @@
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: REX.W 0f 40 + cc /r
+  // Opcode: REX.W 0f 40 + cc /r.
   emit_rex_64(dst, src);
   emit(0x0f);
   emit(0x40 + cc);
@@ -863,7 +873,7 @@
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: 0f 40 + cc /r
+  // Opcode: 0f 40 + cc /r.
   emit_optional_rex_32(dst, src);
   emit(0x0f);
   emit(0x40 + cc);
@@ -880,7 +890,7 @@
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode: 0f 40 + cc /r
+  // Opcode: 0f 40 + cc /r.
   emit_optional_rex_32(dst, src);
   emit(0x0f);
   emit(0x40 + cc);
@@ -1110,17 +1120,17 @@
     int offs = L->pos() - pc_offset();
     ASSERT(offs <= 0);
     if (is_int8(offs - short_size)) {
-      // 0111 tttn #8-bit disp
+      // 0111 tttn #8-bit disp.
       emit(0x70 | cc);
       emit((offs - short_size) & 0xFF);
     } else {
-      // 0000 1111 1000 tttn #32-bit disp
+      // 0000 1111 1000 tttn #32-bit disp.
       emit(0x0F);
       emit(0x80 | cc);
       emitl(offs - long_size);
     }
   } else if (L->is_linked()) {
-    // 0000 1111 1000 tttn #32-bit disp
+    // 0000 1111 1000 tttn #32-bit disp.
     emit(0x0F);
     emit(0x80 | cc);
     emitl(L->pos());
@@ -1142,7 +1152,7 @@
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   ASSERT(is_uint4(cc));
-  // 0000 1111 1000 tttn #32-bit disp
+  // 0000 1111 1000 tttn #32-bit disp.
   emit(0x0F);
   emit(0x80 | cc);
   emit_code_target(target, rmode);
@@ -1156,21 +1166,21 @@
     int offs = L->pos() - pc_offset() - 1;
     ASSERT(offs <= 0);
     if (is_int8(offs - sizeof(int8_t))) {
-      // 1110 1011 #8-bit disp
+      // 1110 1011 #8-bit disp.
       emit(0xEB);
       emit((offs - sizeof(int8_t)) & 0xFF);
     } else {
-      // 1110 1001 #32-bit disp
+      // 1110 1001 #32-bit disp.
       emit(0xE9);
       emitl(offs - sizeof(int32_t));
     }
   } else  if (L->is_linked()) {
-    // 1110 1001 #32-bit disp
+    // 1110 1001 #32-bit disp.
     emit(0xE9);
     emitl(L->pos());
     L->link_to(pc_offset() - sizeof(int32_t));
   } else {
-    // 1110 1001 #32-bit disp
+    // 1110 1001 #32-bit disp.
     ASSERT(L->is_unused());
     emit(0xE9);
     int32_t current = pc_offset();
@@ -1183,7 +1193,7 @@
 void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // 1110 1001 #32-bit disp
+  // 1110 1001 #32-bit disp.
   emit(0xE9);
   emit_code_target(target, rmode);
 }
@@ -1192,7 +1202,7 @@
 void Assembler::jmp(Register target) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode FF/4 r64
+  // Opcode FF/4 r64.
   if (target.high_bit()) {
     emit_rex_64(target);
   }
@@ -1204,7 +1214,7 @@
 void Assembler::jmp(const Operand& src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  // Opcode FF/4 m64
+  // Opcode FF/4 m64.
   emit_optional_rex_32(src);
   emit(0xFF);
   emit_operand(0x4, src);
@@ -1413,10 +1423,8 @@
 }
 
 
-/*
- * Loads the ip-relative location of the src label into the target
- * location (as a 32-bit offset sign extended to 64-bit).
- */
+// Loads the ip-relative location of the src label into the target location
+// (as a 32-bit offset sign extended to 64-bit).
 void Assembler::movl(const Operand& dst, Label* src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2006,7 +2014,7 @@
 }
 
 
-// FPU instructions
+// FPU instructions.
 
 
 void Assembler::fld(int i) {
@@ -2377,7 +2385,7 @@
   emit(b2 + i);
 }
 
-// SSE 2 operations
+// SSE 2 operations.
 
 void Assembler::movsd(const Operand& dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
@@ -2511,6 +2519,38 @@
 }
 
 
+void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0f);
+  emit(0x57);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::comisd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0f);
+  emit(0x2f);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0f);
+  emit(0x2e);
+  emit_sse_operand(dst, src);
+}
+
 
 void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
   Register ireg = { reg.code() };
@@ -2527,7 +2567,7 @@
 }
 
 
-// Relocation information implementations
+// Relocation information implementations.
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   ASSERT(rmode != RelocInfo::NONE);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 3f2aef0..6c6f6a3 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -113,8 +113,8 @@
     return code_ & 0x7;
   }
 
-  // (unfortunately we can't make this private in a struct when initializing
-  // by assignment.)
+  // Unfortunately we can't make this private in a struct when initializing
+  // by assignment.
   int code_;
 };
 
@@ -308,7 +308,6 @@
   times_4 = 2,
   times_8 = 3,
   times_int_size = times_4,
-  times_half_pointer_size = times_4,
   times_pointer_size = times_8
 };
 
@@ -1122,6 +1121,10 @@
   void mulsd(XMMRegister dst, XMMRegister src);
   void divsd(XMMRegister dst, XMMRegister src);
 
+  void xorpd(XMMRegister dst, XMMRegister src);
+
+  void comisd(XMMRegister dst, XMMRegister src);
+  void ucomisd(XMMRegister dst, XMMRegister src);
 
   void emit_sse_operand(XMMRegister dst, XMMRegister src);
   void emit_sse_operand(XMMRegister reg, const Operand& adr);
@@ -1168,14 +1171,6 @@
   static const int kMaximalBufferSize = 512*MB;
   static const int kMinimalBufferSize = 4*KB;
 
- protected:
-  // void movsd(XMMRegister dst, const Operand& src);
-  // void movsd(const Operand& dst, XMMRegister src);
-
-  // void emit_sse_operand(XMMRegister reg, const Operand& adr);
-  // void emit_sse_operand(XMMRegister dst, XMMRegister src);
-
-
  private:
   byte* addr_at(int pos)  { return buffer_ + pos; }
   byte byte_at(int pos)  { return buffer_[pos]; }
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 0b95bba..b3c5e33 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -185,14 +185,14 @@
 
 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
   // Stack Layout:
-  // rsp: return address
-  //  +1: Argument n
-  //  +2: Argument n-1
+  // rsp[0]:   Return address
+  // rsp[1]:   Argument n
+  // rsp[2]:   Argument n-1
   //  ...
-  //  +n: Argument 1 = receiver
-  //  +n+1: Argument 0 = function to call
+  // rsp[n]:   Argument 1
+  // rsp[n+1]: Receiver (function to call)
   //
-  // rax contains the number of arguments, n, not counting the function.
+  // rax contains the number of arguments, n, not counting the receiver.
   //
   // 1. Make sure we have at least one argument.
   { Label done;
@@ -205,31 +205,23 @@
     __ bind(&done);
   }
 
-  // 2. Get the function to call from the stack.
-  { Label done, non_function, function;
-    // The function to call is at position n+1 on the stack.
-    __ movq(rdi, Operand(rsp, rax, times_pointer_size, +1 * kPointerSize));
-    __ JumpIfSmi(rdi, &non_function);
-    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-    __ j(equal, &function);
+  // 2. Get the function to call (passed as receiver) from the stack, check
+  //    if it is a function.
+  Label non_function;
+  // The function to call is at position n+1 on the stack.
+  __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+  __ JumpIfSmi(rdi, &non_function);
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &non_function);
 
-    // Non-function called: Clear the function to force exception.
-    __ bind(&non_function);
-    __ xor_(rdi, rdi);
-    __ jmp(&done);
-
-    // Function called: Change context eagerly to get the right global object.
-    __ bind(&function);
+  // 3a. Patch the first argument if necessary when calling a function.
+  Label shift_arguments;
+  { Label convert_to_object, use_global_receiver, patch_receiver;
+    // Change context eagerly in case we need the global receiver.
     __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-    __ bind(&done);
-  }
-
-  // 3. Make sure first argument is an object; convert if necessary.
-  { Label call_to_object, use_global_receiver, patch_receiver, done;
     __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
-
-    __ JumpIfSmi(rbx, &call_to_object);
+    __ JumpIfSmi(rbx, &convert_to_object);
 
     __ CompareRoot(rbx, Heap::kNullValueRootIndex);
     __ j(equal, &use_global_receiver);
@@ -237,31 +229,28 @@
     __ j(equal, &use_global_receiver);
 
     __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
-    __ j(below, &call_to_object);
+    __ j(below, &convert_to_object);
     __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
-    __ j(below_equal, &done);
+    __ j(below_equal, &shift_arguments);
 
-    __ bind(&call_to_object);
-    __ EnterInternalFrame();  // preserves rax, rbx, rdi
-
-    // Store the arguments count on the stack (smi tagged).
+    __ bind(&convert_to_object);
+    __ EnterInternalFrame();  // In order to preserve argument count.
     __ Integer32ToSmi(rax, rax);
     __ push(rax);
 
-    __ push(rdi);  // save edi across the call
     __ push(rbx);
     __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
     __ movq(rbx, rax);
-    __ pop(rdi);  // restore edi after the call
 
-    // Get the arguments count and untag it.
     __ pop(rax);
     __ SmiToInteger32(rax, rax);
-
     __ LeaveInternalFrame();
+    // Restore the function to rdi.
+    __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
     __ jmp(&patch_receiver);
 
-    // Use the global receiver object from the called function as the receiver.
+    // Use the global receiver object from the called function as the
+    // receiver.
     __ bind(&use_global_receiver);
     const int kGlobalIndex =
         Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
@@ -273,48 +262,57 @@
     __ bind(&patch_receiver);
     __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
 
-    __ bind(&done);
+    __ jmp(&shift_arguments);
   }
 
-  // 4. Shift stuff one slot down the stack.
+
+  // 3b. Patch the first argument when calling a non-function.  The
+  //     CALL_NON_FUNCTION builtin expects the non-function callee as
+  //     receiver, so overwrite the first argument which will ultimately
+  //     become the receiver.
+  __ bind(&non_function);
+  __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
+  __ xor_(rdi, rdi);
+
+  // 4. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  __ bind(&shift_arguments);
   { Label loop;
-    __ lea(rcx, Operand(rax, +1));  // +1 ~ copy receiver too
+    __ movq(rcx, rax);
     __ bind(&loop);
     __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
     __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
     __ decq(rcx);
-    __ j(not_zero, &loop);
+    __ j(not_sign, &loop);  // While non-negative (to copy return address).
+    __ pop(rbx);  // Discard copy of return address.
+    __ decq(rax);  // One fewer argument (first argument is new receiver).
   }
 
-  // 5. Remove TOS (copy of last arguments), but keep return address.
-  __ pop(rbx);
-  __ pop(rcx);
-  __ push(rbx);
-  __ decq(rax);
-
-  // 6. Check that function really was a function and get the code to
-  //    call from the function and check that the number of expected
-  //    arguments matches what we're providing.
-  { Label invoke, trampoline;
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+  { Label function;
     __ testq(rdi, rdi);
-    __ j(not_zero, &invoke);
+    __ j(not_zero, &function);
     __ xor_(rbx, rbx);
     __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
-    __ bind(&trampoline);
     __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
             RelocInfo::CODE_TARGET);
-
-    __ bind(&invoke);
-    __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-    __ movsxlq(rbx,
-           FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
-    __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
-    __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
-    __ cmpq(rax, rbx);
-    __ j(not_equal, &trampoline);
+    __ bind(&function);
   }
 
-  // 7. Jump (tail-call) to the code in register edx without checking arguments.
+  // 5b. Get the code to call from the function and check that the number of
+  //     expected arguments matches what we're providing.  If so, jump
+  //     (tail-call) to the code in register edx without checking arguments.
+  __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movsxlq(rbx,
+         FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+  __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+  __ cmpq(rax, rbx);
+  __ j(not_equal,
+       Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+       RelocInfo::CODE_TARGET);
+
   ParameterCount expected(0);
   __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
 }
@@ -586,6 +584,7 @@
                        JSFunction::kPrototypeOrInitialMapOffset));
 
   // Check whether an empty sized array is requested.
+  __ SmiToInteger64(array_size, array_size);
   __ testq(array_size, array_size);
   __ j(not_zero, &not_empty);
 
@@ -605,7 +604,7 @@
   __ bind(&not_empty);
   ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
-                        times_half_pointer_size,  // array_size is a smi.
+                        times_pointer_size,
                         array_size,
                         result,
                         elements_array_end,
@@ -618,19 +617,20 @@
   // result: JSObject
   // elements_array: initial map
   // elements_array_end: start of next object
-  // array_size: size of array (smi)
+  // array_size: size of array
   __ bind(&allocated);
   __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
   __ Move(elements_array, Factory::empty_fixed_array());
   __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
   // Field JSArray::kElementsOffset is initialized later.
-  __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
+  __ Integer32ToSmi(scratch, array_size);
+  __ movq(FieldOperand(result, JSArray::kLengthOffset), scratch);
 
   // Calculate the location of the elements array and set elements array member
   // of the JSArray.
   // result: JSObject
   // elements_array_end: start of next object
-  // array_size: size of array (smi)
+  // array_size: size of array
   __ lea(elements_array, Operand(result, JSArray::kSize));
   __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
 
@@ -638,9 +638,8 @@
   // result: JSObject
   // elements_array: elements array
   // elements_array_end: start of next object
-  // array_size: size of array (smi)
+  // array_size: size of array
   ASSERT(kSmiTag == 0);
-  __ SmiToInteger64(array_size, array_size);
   __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
           Factory::fixed_array_map());
   Label not_empty_2, fill_array;
@@ -900,7 +899,11 @@
   // edi: called object
   // eax: number of arguments
   __ bind(&non_function_call);
-  // Set expected number of arguments to zero (not changing eax).
+  // CALL_NON_FUNCTION expects the non-function constructor as receiver
+  // (instead of the original receiver from the call site).  The receiver is
+  // stack element argc+1.
+  __ movq(Operand(rsp, rax, times_pointer_size, kPointerSize), rdi);
+  // Set expected number of arguments to zero (not changing rax).
   __ movq(rbx, Immediate(0));
   __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index ce1c938..6c063f3 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -246,14 +246,10 @@
 // -----------------------------------------------------------------------------
 // CodeGenerator implementation.
 
-CodeGenerator::CodeGenerator(MacroAssembler* masm,
-                             Handle<Script> script,
-                             bool is_eval)
-    : is_eval_(is_eval),
-      script_(script),
-      deferred_(8),
+CodeGenerator::CodeGenerator(MacroAssembler* masm)
+    : deferred_(8),
       masm_(masm),
-      scope_(NULL),
+      info_(NULL),
       frame_(NULL),
       allocator_(NULL),
       state_(NULL),
@@ -263,6 +259,9 @@
 }
 
 
+Scope* CodeGenerator::scope() { return info_->function()->scope(); }
+
+
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.  The inevitable call
   // will sync frame elements to memory anyway, so we do it eagerly to
@@ -278,16 +277,12 @@
 }
 
 
-void CodeGenerator::Generate(FunctionLiteral* function,
-                             Mode mode,
-                             CompilationInfo* info) {
+void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
   // Record the position for debugging purposes.
-  CodeForFunctionPosition(function);
-  ZoneList<Statement*>* body = function->body();
+  CodeForFunctionPosition(info->function());
 
   // Initialize state.
-  ASSERT(scope_ == NULL);
-  scope_ = function->scope();
+  info_ = info;
   ASSERT(allocator_ == NULL);
   RegisterAllocator register_allocator(this);
   allocator_ = &register_allocator;
@@ -302,7 +297,7 @@
 
 #ifdef DEBUG
   if (strlen(FLAG_stop_at) > 0 &&
-      function->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
     frame_->SpillAll();
     __ int3();
   }
@@ -328,7 +323,7 @@
       frame_->AllocateStackSlots();
 
       // Allocate the local context if needed.
-      int heap_slots = scope_->num_heap_slots();
+      int heap_slots = scope()->num_heap_slots();
       if (heap_slots > 0) {
         Comment cmnt(masm_, "[ allocate local context");
         // Allocate local context.
@@ -358,7 +353,6 @@
       // 3) don't copy parameter operand code from SlotOperand!
       {
         Comment cmnt2(masm_, "[ copy context parameters into .context");
-
         // Note that iteration order is relevant here! If we have the same
         // parameter twice (e.g., function (x, y, x)), and that parameter
         // needs to be copied into the context, it must be the last argument
@@ -367,15 +361,15 @@
         // order: such a parameter is copied repeatedly into the same
         // context location and thus the last value is what is seen inside
         // the function.
-        for (int i = 0; i < scope_->num_parameters(); i++) {
-          Variable* par = scope_->parameter(i);
+        for (int i = 0; i < scope()->num_parameters(); i++) {
+          Variable* par = scope()->parameter(i);
           Slot* slot = par->slot();
           if (slot != NULL && slot->type() == Slot::CONTEXT) {
             // The use of SlotOperand below is safe in unspilled code
             // because the slot is guaranteed to be a context slot.
             //
             // There are no parameters in the global scope.
-            ASSERT(!scope_->is_global_scope());
+            ASSERT(!scope()->is_global_scope());
             frame_->PushParameterAt(i);
             Result value = frame_->Pop();
             value.ToRegister();
@@ -403,9 +397,9 @@
       }
 
       // Initialize ThisFunction reference if present.
-      if (scope_->is_function_scope() && scope_->function() != NULL) {
+      if (scope()->is_function_scope() && scope()->function() != NULL) {
         frame_->Push(Factory::the_hole_value());
-        StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+        StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
       }
     } else {
       // When used as the secondary compiler for splitting, rbp, rsi,
@@ -423,12 +417,12 @@
     // Generate code to 'execute' declarations and initialize functions
     // (source elements). In case of an illegal redeclaration we need to
     // handle that instead of processing the declarations.
-    if (scope_->HasIllegalRedeclaration()) {
+    if (scope()->HasIllegalRedeclaration()) {
       Comment cmnt(masm_, "[ illegal redeclarations");
-      scope_->VisitIllegalRedeclaration(this);
+      scope()->VisitIllegalRedeclaration(this);
     } else {
       Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope_->declarations());
+      ProcessDeclarations(scope()->declarations());
       // Bail out if a stack-overflow exception occurred when processing
       // declarations.
       if (HasStackOverflow()) return;
@@ -443,7 +437,7 @@
     // Compile the body of the function in a vanilla state. Don't
     // bother compiling all the code if the scope has an illegal
     // redeclaration.
-    if (!scope_->HasIllegalRedeclaration()) {
+    if (!scope()->HasIllegalRedeclaration()) {
       Comment cmnt(masm_, "[ function body");
 #ifdef DEBUG
       bool is_builtin = Bootstrapper::IsActive();
@@ -454,14 +448,14 @@
         // Ignore the return value.
       }
 #endif
-      VisitStatements(body);
+      VisitStatements(info->function()->body());
 
       // Handle the return from the function.
       if (has_valid_frame()) {
         // If there is a valid frame, control flow can fall off the end of
         // the body.  In that case there is an implicit return statement.
         ASSERT(!function_return_is_shadowed_);
-        CodeForReturnPosition(function);
+        CodeForReturnPosition(info->function());
         frame_->PrepareForReturn();
         Result undefined(Factory::undefined_value());
         if (function_return_.is_bound()) {
@@ -504,7 +498,6 @@
   // There is no need to delete the register allocator, it is a
   // stack-allocated local.
   allocator_ = NULL;
-  scope_ = NULL;
 }
 
 void CodeGenerator::GenerateReturnSequence(Result* return_value) {
@@ -527,7 +520,7 @@
   // Leave the frame and return popping the arguments and the
   // receiver.
   frame_->Exit();
-  masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+  masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Add padding that will be overwritten by a debugger breakpoint.
   // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
@@ -695,7 +688,7 @@
   // Load the receiver and the existing arguments object onto the
   // expression stack. Avoid allocating the arguments object here.
   Load(receiver);
-  LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+  LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
 
   // Emit the source position information after having loaded the
   // receiver and the arguments.
@@ -773,8 +766,8 @@
       __ j(equal, &adapted);
 
       // No arguments adaptor frame. Copy fixed number of arguments.
-      __ movq(rax, Immediate(scope_->num_parameters()));
-      for (int i = 0; i < scope_->num_parameters(); i++) {
+      __ movq(rax, Immediate(scope()->num_parameters()));
+      for (int i = 0; i < scope()->num_parameters(); i++) {
         __ push(frame_->ParameterAt(i));
       }
       __ jmp(&invoke);
@@ -1228,7 +1221,7 @@
     // Compare and branch to the body if true or the next test if
     // false.  Prefer the next test as a fall through.
     ControlDestination dest(clause->body_target(), &next_test, false);
-    Comparison(equal, true, &dest);
+    Comparison(node, equal, true, &dest);
 
     // If the comparison fell through to the true target, jump to the
     // actual body.
@@ -2225,8 +2218,7 @@
   // Spill everything, even constants, to the frame.
   frame_->SpillAll();
 
-  DebuggerStatementStub ces;
-  frame_->CallStub(&ces, 0);
+  frame_->DebugBreak();
   // Ignore the return value.
 #endif
 }
@@ -2263,7 +2255,7 @@
 
   // Build the function boilerplate and instantiate it.
   Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(node, script_, this);
+      Compiler::BuildBoilerplate(node, script(), this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) return;
   InstantiateBoilerplate(boilerplate);
@@ -2503,17 +2495,19 @@
   // Load the literals array of the function.
   __ movq(literals.reg(),
           FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-  // Literal array.
+
   frame_->Push(&literals);
-  // Literal index.
   frame_->Push(Smi::FromInt(node->literal_index()));
-  // Constant elements.
   frame_->Push(node->constant_elements());
+  int length = node->values()->length();
   Result clone;
   if (node->depth() > 1) {
     clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else {
+  } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
     clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+  } else {
+    FastCloneShallowArrayStub stub(length);
+    clone = frame_->CallStub(&stub, 3);
   }
   frame_->Push(&clone);
 
@@ -2763,9 +2757,6 @@
     // JavaScript example: 'foo(1, 2, 3)'  // foo is global
     // ----------------------------------
 
-    // Push the name of the function and the receiver onto the stack.
-    frame_->Push(var->name());
-
     // Pass the global object as the receiver and let the IC stub
     // patch the stack to use the global proxy as 'this' in the
     // invoked function.
@@ -2777,6 +2768,9 @@
       Load(args->at(i));
     }
 
+    // Push the name of the function on the frame.
+    frame_->Push(var->name());
+
     // Call the IC initialization code.
     CodeForSourcePosition(node->position());
     Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
@@ -2784,7 +2778,7 @@
                                        loop_nesting());
     frame_->RestoreContextRegister();
     // Replace the function on the stack with the result.
-    frame_->SetElementAt(0, &result);
+    frame_->Push(&result);
 
   } else if (var != NULL && var->slot() != NULL &&
              var->slot()->type() == Slot::LOOKUP) {
@@ -2837,8 +2831,7 @@
                       node->position());
 
       } else {
-        // Push the name of the function and the receiver onto the stack.
-        frame_->Push(name);
+        // Push the receiver onto the frame.
         Load(property->obj());
 
         // Load the arguments.
@@ -2847,14 +2840,16 @@
           Load(args->at(i));
         }
 
+        // Push the name of the function onto the frame.
+        frame_->Push(name);
+
         // Call the IC initialization code.
         CodeForSourcePosition(node->position());
         Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
                                            arg_count,
                                            loop_nesting());
         frame_->RestoreContextRegister();
-        // Replace the function on the stack with the result.
-        frame_->SetElementAt(0, &result);
+        frame_->Push(&result);
       }
 
     } else {
@@ -2945,8 +2940,6 @@
   Runtime::Function* function = node->function();
 
   if (function == NULL) {
-    // Prepare stack for calling JS runtime function.
-    frame_->Push(node->name());
     // Push the builtins object found in the current global object.
     Result temp = allocator()->Allocate();
     ASSERT(temp.is_valid());
@@ -2964,11 +2957,12 @@
 
   if (function == NULL) {
     // Call the JS runtime function.
+    frame_->Push(node->name());
     Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
                                        arg_count,
                                        loop_nesting_);
     frame_->RestoreContextRegister();
-    frame_->SetElementAt(0, &answer);
+    frame_->Push(&answer);
   } else {
     // Call the C runtime function.
     Result answer = frame_->CallRuntime(function, arg_count);
@@ -3077,7 +3071,6 @@
 
       case Token::SUB: {
         GenericUnaryOpStub stub(Token::SUB, overwrite);
-        // TODO(1222589): remove dependency of TOS being cached inside stub
         Result operand = frame_->Pop();
         Result answer = frame_->CallStub(&stub, &operand);
         frame_->Push(&answer);
@@ -3593,7 +3586,7 @@
   }
   Load(left);
   Load(right);
-  Comparison(cc, strict, destination());
+  Comparison(node, cc, strict, destination());
 }
 
 
@@ -3610,7 +3603,7 @@
   Load(args->at(0));
   Result key = frame_->Pop();
   // Explicitly create a constant result.
-  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
   // Call the shared stub to get to arguments[key].
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
   Result result = frame_->CallStub(&stub, &key, &count);
@@ -3719,7 +3712,7 @@
   ASSERT(args->length() == 0);
   // ArgumentsAccessStub takes the parameter count as an input argument
   // in register eax.  Create a constant result for it.
-  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
+  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
   // Call the shared stub to get to the arguments.length.
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
   Result result = frame_->CallStub(&stub, &count);
@@ -3978,6 +3971,17 @@
 }
 
 
+void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+
+  // Load the argument on the stack and jump to the runtime.
+  Load(args->at(0));
+
+  Result answer = frame_->CallRuntime(Runtime::kNumberToString, 1);
+  frame_->Push(&answer);
+}
+
+
 void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
@@ -4267,34 +4271,52 @@
   // The value to convert should be popped from the frame.
   Result value = frame_->Pop();
   value.ToRegister();
-  // Fast case checks.
 
-  // 'false' => false.
-  __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
-  dest->false_target()->Branch(equal);
+  if (value.is_number()) {
+    Comment cmnt(masm_, "ONLY_NUMBER");
+    // Fast case if NumberInfo indicates only numbers.
+    if (FLAG_debug_code) {
+      __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
+    }
+    // Smi => false iff zero.
+    __ SmiCompare(value.reg(), Smi::FromInt(0));
+    dest->false_target()->Branch(equal);
+    Condition is_smi = masm_->CheckSmi(value.reg());
+    dest->true_target()->Branch(is_smi);
+    __ fldz();
+    __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
+    __ FCmp();
+    value.Unuse();
+    dest->Split(not_zero);
+  } else {
+    // Fast case checks.
+    // 'false' => false.
+    __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
+    dest->false_target()->Branch(equal);
 
-  // 'true' => true.
-  __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
-  dest->true_target()->Branch(equal);
+    // 'true' => true.
+    __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
+    dest->true_target()->Branch(equal);
 
-  // 'undefined' => false.
-  __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
-  dest->false_target()->Branch(equal);
+    // 'undefined' => false.
+    __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
+    dest->false_target()->Branch(equal);
 
-  // Smi => false iff zero.
-  __ SmiCompare(value.reg(), Smi::FromInt(0));
-  dest->false_target()->Branch(equal);
-  Condition is_smi = masm_->CheckSmi(value.reg());
-  dest->true_target()->Branch(is_smi);
+    // Smi => false iff zero.
+    __ SmiCompare(value.reg(), Smi::FromInt(0));
+    dest->false_target()->Branch(equal);
+    Condition is_smi = masm_->CheckSmi(value.reg());
+    dest->true_target()->Branch(is_smi);
 
-  // Call the stub for all other cases.
-  frame_->Push(&value);  // Undo the Pop() from above.
-  ToBooleanStub stub;
-  Result temp = frame_->CallStub(&stub, 1);
-  // Convert the result to a condition code.
-  __ testq(temp.reg(), temp.reg());
-  temp.Unuse();
-  dest->Split(not_equal);
+    // Call the stub for all other cases.
+    frame_->Push(&value);  // Undo the Pop() from above.
+    ToBooleanStub stub;
+    Result temp = frame_->CallStub(&stub, 1);
+    // Convert the result to a condition code.
+    __ testq(temp.reg(), temp.reg());
+    temp.Unuse();
+    dest->Split(not_equal);
+  }
 }
 
 
@@ -4789,13 +4811,13 @@
 }
 
 
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
-  if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-  ASSERT(scope_->arguments_shadow() != NULL);
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
+  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+  ASSERT(scope()->arguments_shadow() != NULL);
   // We don't want to do lazy arguments allocation for functions that
   // have heap-allocated contexts, because it interfers with the
   // uninitialized const tracking in the context objects.
-  return (scope_->num_heap_slots() > 0)
+  return (scope()->num_heap_slots() > 0)
       ? EAGER_ARGUMENTS_ALLOCATION
       : LAZY_ARGUMENTS_ALLOCATION;
 }
@@ -4815,14 +4837,14 @@
     ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
     frame_->PushFunction();
     frame_->PushReceiverSlotAddress();
-    frame_->Push(Smi::FromInt(scope_->num_parameters()));
+    frame_->Push(Smi::FromInt(scope()->num_parameters()));
     Result result = frame_->CallStub(&stub, 3);
     frame_->Push(&result);
   }
 
 
-  Variable* arguments = scope_->arguments()->var();
-  Variable* shadow = scope_->arguments_shadow()->var();
+  Variable* arguments = scope()->arguments()->var();
+  Variable* shadow = scope()->arguments_shadow()->var();
   ASSERT(arguments != NULL && arguments->slot() != NULL);
   ASSERT(shadow != NULL && shadow->slot() != NULL);
   JumpTarget done;
@@ -4831,7 +4853,7 @@
     // We have to skip storing into the arguments slot if it has
     // already been written to. This can happen if the a function
     // has a local variable named 'arguments'.
-    LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+    LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
     Result probe = frame_->Pop();
     if (probe.is_constant()) {
       // We have to skip updating the arguments object if it has been
@@ -4875,7 +4897,8 @@
 }
 
 
-void CodeGenerator::Comparison(Condition cc,
+void CodeGenerator::Comparison(AstNode* node,
+                               Condition cc,
                                bool strict,
                                ControlDestination* dest) {
   // Strict only makes sense for equality comparisons.
@@ -4922,7 +4945,8 @@
         default:
           UNREACHABLE();
       }
-    } else {  // Only one side is a constant Smi.
+    } else {
+      // Only one side is a constant Smi.
       // If left side is a constant Smi, reverse the operands.
       // Since one side is a constant Smi, conversion order does not matter.
       if (left_side_constant_smi) {
@@ -4936,6 +4960,8 @@
       // Implement comparison against a constant Smi, inlining the case
       // where both sides are Smis.
       left_side.ToRegister();
+      Register left_reg = left_side.reg();
+      Handle<Object> right_val = right_side.handle();
 
       // Here we split control flow to the stub call and inlined cases
       // before finally splitting it to the control destination.  We use
@@ -4943,12 +4969,48 @@
       // the first split.  We manually handle the off-frame references
       // by reconstituting them on the non-fall-through path.
       JumpTarget is_smi;
-      Register left_reg = left_side.reg();
-      Handle<Object> right_val = right_side.handle();
 
       Condition left_is_smi = masm_->CheckSmi(left_side.reg());
       is_smi.Branch(left_is_smi);
 
+      bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
+          && node->AsCompareOperation()->is_for_loop_condition();
+      if (!is_for_loop_compare && right_val->IsSmi()) {
+        // Right side is a constant smi and left side has been checked
+        // not to be a smi.
+        JumpTarget not_number;
+        __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
+               Factory::heap_number_map());
+        not_number.Branch(not_equal, &left_side);
+        __ movsd(xmm1,
+                 FieldOperand(left_reg, HeapNumber::kValueOffset));
+        int value = Smi::cast(*right_val)->value();
+        if (value == 0) {
+          __ xorpd(xmm0, xmm0);
+        } else {
+          Result temp = allocator()->Allocate();
+          __ movl(temp.reg(), Immediate(value));
+          __ cvtlsi2sd(xmm0, temp.reg());
+          temp.Unuse();
+        }
+        __ ucomisd(xmm1, xmm0);
+        // Jump to builtin for NaN.
+        not_number.Branch(parity_even, &left_side);
+        left_side.Unuse();
+        Condition double_cc = cc;
+        switch (cc) {
+          case less:          double_cc = below;       break;
+          case equal:         double_cc = equal;       break;
+          case less_equal:    double_cc = below_equal; break;
+          case greater:       double_cc = above;       break;
+          case greater_equal: double_cc = above_equal; break;
+          default: UNREACHABLE();
+        }
+        dest->true_target()->Branch(double_cc);
+        dest->false_target()->Jump();
+        not_number.Bind(&left_side);
+      }
+
       // Setup and call the compare stub.
       CompareStub stub(cc, strict);
       Result result = frame_->CallStub(&stub, &left_side, &right_side);
@@ -5121,26 +5183,34 @@
     // Neither operand is known to be a string.
   }
 
-  bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
-  bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
-  bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
-  bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
+  bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
+  bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
+  bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
+  bool right_is_non_smi_constant =
+      right.is_constant() && !right.handle()->IsSmi();
 
-  if (left_is_smi && right_is_smi) {
+  if (left_is_smi_constant && right_is_smi_constant) {
     // Compute the constant result at compile time, and leave it on the frame.
     int left_int = Smi::cast(*left.handle())->value();
     int right_int = Smi::cast(*right.handle())->value();
     if (FoldConstantSmis(op, left_int, right_int)) return;
   }
 
+  // Get number type of left and right sub-expressions.
+  NumberInfo::Type operands_type =
+      NumberInfo::Combine(left.number_info(), right.number_info());
+
   Result answer;
-  if (left_is_non_smi || right_is_non_smi) {
-    GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
+  if (left_is_non_smi_constant || right_is_non_smi_constant) {
+    GenericBinaryOpStub stub(op,
+                             overwrite_mode,
+                             NO_SMI_CODE_IN_STUB,
+                             operands_type);
     answer = stub.GenerateCall(masm_, frame_, &left, &right);
-  } else if (right_is_smi) {
+  } else if (right_is_smi_constant) {
     answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
                                         type, false, overwrite_mode);
-  } else if (left_is_smi) {
+  } else if (left_is_smi_constant) {
     answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
                                         type, true, overwrite_mode);
   } else {
@@ -5152,10 +5222,62 @@
     if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
       answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
     } else {
-      GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
+      GenericBinaryOpStub stub(op,
+                               overwrite_mode,
+                               NO_GENERIC_BINARY_FLAGS,
+                               operands_type);
       answer = stub.GenerateCall(masm_, frame_, &left, &right);
     }
   }
+
+  // Set NumberInfo of result according to the operation performed.
+  // We rely on the fact that smis have a 32 bit payload on x64.
+  ASSERT(kSmiValueSize == 32);
+  NumberInfo::Type result_type = NumberInfo::kUnknown;
+  switch (op) {
+    case Token::COMMA:
+      result_type = right.number_info();
+      break;
+    case Token::OR:
+    case Token::AND:
+      // Result type can be either of the two input types.
+      result_type = operands_type;
+      break;
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+      // Result is always a smi.
+      result_type = NumberInfo::kSmi;
+      break;
+    case Token::SAR:
+    case Token::SHL:
+      // Result is always a smi.
+      result_type = NumberInfo::kSmi;
+      break;
+    case Token::SHR:
+      // Result of x >>> y is always a smi if y >= 1, otherwise a number.
+      result_type = (right.is_constant() && right.handle()->IsSmi()
+                     && Smi::cast(*right.handle())->value() >= 1)
+          ? NumberInfo::kSmi
+          : NumberInfo::kNumber;
+      break;
+    case Token::ADD:
+      // Result could be a string or a number. Check types of inputs.
+      result_type = NumberInfo::IsNumber(operands_type)
+          ? NumberInfo::kNumber
+          : NumberInfo::kUnknown;
+      break;
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+      // Result is always a number.
+      result_type = NumberInfo::kNumber;
+      break;
+    default:
+      UNREACHABLE();
+  }
+  answer.set_number_info(result_type);
   frame_->Push(&answer);
 }
 
@@ -6228,6 +6350,63 @@
 }
 
 
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [rsp + kPointerSize]: constant elements.
+  // [rsp + (2 * kPointerSize)]: literal index.
+  // [rsp + (3 * kPointerSize)]: literals array.
+
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+  int size = JSArray::kSize + elements_size;
+
+  // Load boilerplate object into rcx and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ movq(rcx, Operand(rsp, 3 * kPointerSize));
+  __ movq(rax, Operand(rsp, 2 * kPointerSize));
+  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+  __ movq(rcx,
+          FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
+  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &slow_case);
+
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+      __ movq(rbx, FieldOperand(rcx, i));
+      __ movq(FieldOperand(rax, i), rbx);
+    }
+  }
+
+  if (length_ > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+    __ lea(rdx, Operand(rax, JSArray::kSize));
+    __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
+
+    // Copy the elements array.
+    for (int i = 0; i < elements_size; i += kPointerSize) {
+      __ movq(rbx, FieldOperand(rcx, i));
+      __ movq(FieldOperand(rdx, i), rbx);
+    }
+  }
+
+  // Return and remove the on-stack parameters.
+  __ ret(3 * kPointerSize);
+
+  __ bind(&slow_case);
+  ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
+  __ TailCallRuntime(runtime, 3, 1);
+}
+
+
 void ToBooleanStub::Generate(MacroAssembler* masm) {
   Label false_result, true_result, not_string;
   __ movq(rax, Operand(rsp, 1 * kPointerSize));
@@ -7241,30 +7420,107 @@
 
 
 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+  // rsp[0] : return address
+  // rsp[8] : number of parameters
+  // rsp[16] : receiver displacement
+  // rsp[24] : function
+
   // The displacement is used for skipping the return address and the
   // frame pointer on the stack. It is the offset of the last
   // parameter (if any) relative to the frame pointer.
   static const int kDisplacement = 2 * kPointerSize;
 
   // Check if the calling frame is an arguments adaptor frame.
-  Label runtime;
+  Label adaptor_frame, try_allocate, runtime;
   __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
                 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &runtime);
-  // Value in rcx is Smi encoded.
+  __ j(equal, &adaptor_frame);
+
+  // Get the length from the frame.
+  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+  __ jmp(&try_allocate);
 
   // Patch the arguments.length and the parameters pointer.
+  __ bind(&adaptor_frame);
   __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
   __ movq(Operand(rsp, 1 * kPointerSize), rcx);
-  SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
+  // Do not clobber the length index for the indexing operation since
+  // it is used compute the size for allocation later.
+  SmiIndex index = masm->SmiToIndex(rbx, rcx, kPointerSizeLog2);
   __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
   __ movq(Operand(rsp, 2 * kPointerSize), rdx);
 
+  // Try the new space allocation. Start out with computing the size of
+  // the arguments object and the elements array.
+  Label add_arguments_object;
+  __ bind(&try_allocate);
+  __ testq(rcx, rcx);
+  __ j(zero, &add_arguments_object);
+  index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
+  __ lea(rcx, Operand(index.reg, index.scale, FixedArray::kHeaderSize));
+  __ bind(&add_arguments_object);
+  __ addq(rcx, Immediate(Heap::kArgumentsObjectSize));
+
+  // Do the allocation of both objects in one go.
+  __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
+
+  // Get the arguments boilerplate from the current (global) context.
+  int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+  __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+  __ movq(rdi, Operand(rdi, offset));
+
+  // Copy the JS object part.
+  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+    __ movq(kScratchRegister, FieldOperand(rdi, i));
+    __ movq(FieldOperand(rax, i), kScratchRegister);
+  }
+
+  // Setup the callee in-object property.
+  ASSERT(Heap::arguments_callee_index == 0);
+  __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
+  __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
+
+  // Get the length (smi tagged) and set that as an in-object property too.
+  ASSERT(Heap::arguments_length_index == 1);
+  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+  __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
+
+  // If there are no actual arguments, we're done.
+  Label done;
+  __ testq(rcx, rcx);
+  __ j(zero, &done);
+
+  // Get the parameters pointer from the stack and untag the length.
+  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+  __ SmiToInteger32(rcx, rcx);
+
+  // Setup the elements pointer in the allocated arguments object and
+  // initialize the header in the elements fixed array.
+  __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+  __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
+  __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+  __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+
+  // Copy the fixed array slots.
+  Label loop;
+  __ bind(&loop);
+  __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize));  // Skip receiver.
+  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
+  __ addq(rdi, Immediate(kPointerSize));
+  __ subq(rdx, Immediate(kPointerSize));
+  __ decq(rcx);
+  __ j(not_zero, &loop);
+
+  // Return and remove the on-stack parameters.
+  __ bind(&done);
+  __ ret(3 * kPointerSize);
+
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  Runtime::Function* f = Runtime::FunctionForId(Runtime::kNewArgumentsFast);
-  __ TailCallRuntime(ExternalReference(f), 3, f->result_size);
+  __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
 }
 
 
@@ -7599,6 +7855,9 @@
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+  // of the original receiver from the call site).
+  __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
   __ Set(rax, argc_);
   __ Set(rbx, 0);
   __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
@@ -7987,13 +8246,14 @@
   }
 
   OS::SNPrintF(Vector<char>(name_, len),
-               "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
+               "GenericBinaryOpStub_%s_%s%s_%s%s_%s%s",
                op_name,
                overwrite_name,
                (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
                args_in_registers_ ? "RegArgs" : "StackArgs",
                args_reversed_ ? "_R" : "",
-               use_sse3_ ? "SSE3" : "SSE2");
+               use_sse3_ ? "SSE3" : "SSE2",
+               NumberInfo::ToString(operands_type_));
   return name_;
 }
 
@@ -8317,7 +8577,15 @@
     case Token::DIV: {
       // rax: y
       // rdx: x
-      FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
+      if (NumberInfo::IsNumber(operands_type_)) {
+        if (FLAG_debug_code) {
+          // Assert at runtime that inputs are only numbers.
+          __ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
+          __ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
+        }
+      } else {
+        FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
+      }
       // Fast-case: Both operands are numbers.
       // xmm4 and xmm5 are volatile XMM registers.
       FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index a758e73..3454312 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -305,19 +305,15 @@
 
   // Takes a function literal, generates code for it. This function should only
   // be called by compiler.cc.
-  static Handle<Code> MakeCode(FunctionLiteral* fun,
-                               Handle<Script> script,
-                               bool is_eval,
-                               CompilationInfo* info);
+  static Handle<Code> MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
-  static void MakeCodePrologue(FunctionLiteral* fun);
+  static void MakeCodePrologue(CompilationInfo* info);
 
   // Allocate and install the code.
-  static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
-                                       MacroAssembler* masm,
+  static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
                                        Code::Flags flags,
-                                       Handle<Script> script);
+                                       CompilationInfo* info);
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
@@ -328,7 +324,7 @@
   // Accessors
   MacroAssembler* masm() { return masm_; }
   VirtualFrame* frame() const { return frame_; }
-  Handle<Script> script() { return script_; }
+  inline Handle<Script> script();
 
   bool has_valid_frame() const { return frame_ != NULL; }
 
@@ -352,16 +348,15 @@
 
  private:
   // Construction/Destruction
-  CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
+  explicit CodeGenerator(MacroAssembler* masm);
 
   // Accessors
-  Scope* scope() const { return scope_; }
+  inline bool is_eval();
+  Scope* scope();
 
   // Generating deferred code.
   void ProcessDeferred();
 
-  bool is_eval() { return is_eval_; }
-
   // State
   ControlDestination* destination() const { return state_->destination(); }
 
@@ -390,7 +385,7 @@
   void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
 
   // Main code generation function
-  void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
+  void Generate(CompilationInfo* info, Mode mode);
 
   // Generate the return sequence code.  Should be called no more than
   // once per compiled function, immediately after binding the return
@@ -398,7 +393,7 @@
   void GenerateReturnSequence(Result* return_value);
 
   // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode() const;
+  ArgumentsAllocationMode ArgumentsMode();
 
   // Store the arguments object and allocate it if necessary.
   Result StoreArgumentsObject(bool initial);
@@ -489,7 +484,8 @@
                                   Result* right,
                                   OverwriteMode overwrite_mode);
 
-  void Comparison(Condition cc,
+  void Comparison(AstNode* node,
+                  Condition cc,
                   bool strict,
                   ControlDestination* destination);
 
@@ -581,6 +577,9 @@
   // Support for direct calls from JavaScript to native RegExp code.
   void GenerateRegExpExec(ZoneList<Expression*>* args);
 
+  // Fast support for number to string.
+  void GenerateNumberToString(ZoneList<Expression*>* args);
+
   // Simple condition analysis.
   enum ConditionAnalysis {
     ALWAYS_TRUE,
@@ -604,15 +603,14 @@
   bool HasValidEntryRegisters();
 #endif
 
-  bool is_eval_;  // Tells whether code is generated for eval.
-  Handle<Script> script_;
   ZoneList<DeferredCode*> deferred_;
 
   // Assembler
   MacroAssembler* masm_;  // to generate code
 
+  CompilationInfo* info_;
+
   // Code generation state
-  Scope* scope_;
   VirtualFrame* frame_;
   RegisterAllocator* allocator_;
   CodeGenState* state_;
@@ -660,13 +658,15 @@
  public:
   GenericBinaryOpStub(Token::Value op,
                       OverwriteMode mode,
-                      GenericBinaryFlags flags)
+                      GenericBinaryFlags flags,
+                      NumberInfo::Type operands_type = NumberInfo::kUnknown)
       : op_(op),
         mode_(mode),
         flags_(flags),
         args_in_registers_(false),
         args_reversed_(false),
-        name_(NULL) {
+        name_(NULL),
+        operands_type_(operands_type) {
     use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
@@ -691,28 +691,32 @@
   bool args_reversed_;  // Left and right argument are swapped.
   bool use_sse3_;
   char* name_;
+  NumberInfo::Type operands_type_;
 
   const char* GetName();
 
 #ifdef DEBUG
   void Print() {
-    PrintF("GenericBinaryOpStub (op %s), "
-           "(mode %d, flags %d, registers %d, reversed %d)\n",
+    PrintF("GenericBinaryOpStub %d (op %s), "
+           "(mode %d, flags %d, registers %d, reversed %d, only_numbers %s)\n",
+           MinorKey(),
            Token::String(op_),
            static_cast<int>(mode_),
            static_cast<int>(flags_),
            static_cast<int>(args_in_registers_),
-           static_cast<int>(args_reversed_));
+           static_cast<int>(args_reversed_),
+           NumberInfo::ToString(operands_type_));
   }
 #endif
 
-  // Minor key encoding in 16 bits FRASOOOOOOOOOOMM.
+  // Minor key encoding in 16 bits NNNFRASOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 10> {};
-  class SSE3Bits: public BitField<bool, 12, 1> {};
-  class ArgsInRegistersBits: public BitField<bool, 13, 1> {};
-  class ArgsReversedBits: public BitField<bool, 14, 1> {};
-  class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+  class OpBits: public BitField<Token::Value, 2, 7> {};
+  class SSE3Bits: public BitField<bool, 9, 1> {};
+  class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
+  class ArgsReversedBits: public BitField<bool, 11, 1> {};
+  class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
+  class NumberInfoBits: public BitField<NumberInfo::Type, 13, 3> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
@@ -722,7 +726,8 @@
            | FlagBits::encode(flags_)
            | SSE3Bits::encode(use_sse3_)
            | ArgsInRegistersBits::encode(args_in_registers_)
-           | ArgsReversedBits::encode(args_reversed_);
+           | ArgsReversedBits::encode(args_reversed_)
+           | NumberInfoBits::encode(operands_type_);
   }
 
   void Generate(MacroAssembler* masm);
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index ce3aae8..547daee 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -993,56 +993,22 @@
   byte* current = data + 2;
   // At return, "current" points to the start of the next instruction.
   const char* mnemonic = TwoByteMnemonic(opcode);
-  if (opcode == 0x1F) {
-    // NOP
+  if (operand_size_ == 0x66) {
+    // 0x66 0x0F prefix.
     int mod, regop, rm;
     get_modrm(*current, &mod, &regop, &rm);
-    current++;
-    if (regop == 4) {  // SIB byte present.
-      current++;
-    }
-    if (mod == 1) {  // Byte displacement.
-      current += 1;
-    } else if (mod == 2) {  // 32-bit displacement.
-      current += 4;
-    }  // else no immediate displacement.
-    AppendToBuffer("nop");
-
-  } else  if (opcode == 0xA2 || opcode == 0x31) {
-    // RDTSC or CPUID
-    AppendToBuffer("%s", mnemonic);
-
-  } else if ((opcode & 0xF0) == 0x40) {
-    // CMOVcc: conditional move.
-    int condition = opcode & 0x0F;
-    const InstructionDesc& idesc = cmov_instructions[condition];
-    byte_size_operand_ = idesc.byte_size_operation;
-    current += PrintOperands(idesc.mnem, idesc.op_order_, current);
-
-  } else if ((opcode & 0xF0) == 0x80) {
-    // Jcc: Conditional jump (branch).
-    current = data + JumpConditional(data);
-
-  } else if (opcode == 0xBE || opcode == 0xBF || opcode == 0xB6 ||
-             opcode == 0xB7 || opcode == 0xAF) {
-    // Size-extending moves, IMUL.
-    current += PrintOperands(mnemonic, REG_OPER_OP_ORDER, current);
-
-  } else if ((opcode & 0xF0) == 0x90) {
-    // SETcc: Set byte on condition. Needs pointer to beginning of instruction.
-    current = data + SetCC(data);
-
-  } else if (opcode == 0xAB || opcode == 0xA5 || opcode == 0xAD) {
-    // SHLD, SHRD (double-precision shift), BTS (bit set).
-    AppendToBuffer("%s ", mnemonic);
-    int mod, regop, rm;
-    get_modrm(*current, &mod, &regop, &rm);
-    current += PrintRightOperand(current);
-    if (opcode == 0xAB) {
-      AppendToBuffer(",%s", NameOfCPURegister(regop));
+    const char* mnemonic = "?";
+    if (opcode == 0x57) {
+      mnemonic = "xorpd";
+    } else if (opcode == 0x2E) {
+      mnemonic = "comisd";
+    } else if (opcode == 0x2F) {
+      mnemonic = "ucomisd";
     } else {
-      AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
+      UnimplementedInstruction();
     }
+    AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+    current += PrintRightXMMOperand(current);
   } else if (group_1_prefix_ == 0xF2) {
     // Beginning of instructions with prefix 0xF2.
 
@@ -1080,6 +1046,55 @@
     // Assert that mod is not 3, so source is memory, not an XMM register.
     ASSERT_NE(0xC0, *current & 0xC0);
     current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
+  } else if (opcode == 0x1F) {
+    // NOP
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    current++;
+    if (regop == 4) {  // SIB byte present.
+      current++;
+    }
+    if (mod == 1) {  // Byte displacement.
+      current += 1;
+    } else if (mod == 2) {  // 32-bit displacement.
+      current += 4;
+    }  // else no immediate displacement.
+    AppendToBuffer("nop");
+  } else if (opcode == 0xA2 || opcode == 0x31) {
+    // RDTSC or CPUID
+    AppendToBuffer("%s", mnemonic);
+
+  } else if ((opcode & 0xF0) == 0x40) {
+    // CMOVcc: conditional move.
+    int condition = opcode & 0x0F;
+    const InstructionDesc& idesc = cmov_instructions[condition];
+    byte_size_operand_ = idesc.byte_size_operation;
+    current += PrintOperands(idesc.mnem, idesc.op_order_, current);
+
+  } else if ((opcode & 0xF0) == 0x80) {
+    // Jcc: Conditional jump (branch).
+    current = data + JumpConditional(data);
+
+  } else if (opcode == 0xBE || opcode == 0xBF || opcode == 0xB6 ||
+             opcode == 0xB7 || opcode == 0xAF) {
+    // Size-extending moves, IMUL.
+    current += PrintOperands(mnemonic, REG_OPER_OP_ORDER, current);
+
+  } else if ((opcode & 0xF0) == 0x90) {
+    // SETcc: Set byte on condition. Needs pointer to beginning of instruction.
+    current = data + SetCC(data);
+
+  } else if (opcode == 0xAB || opcode == 0xA5 || opcode == 0xAD) {
+    // SHLD, SHRD (double-precision shift), BTS (bit set).
+    AppendToBuffer("%s ", mnemonic);
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    current += PrintRightOperand(current);
+    if (opcode == 0xAB) {
+      AppendToBuffer(",%s", NameOfCPURegister(regop));
+    } else {
+      AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
+    }
   } else {
     UnimplementedInstruction();
   }
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
index 12b5653..1af7685 100644
--- a/src/x64/fast-codegen-x64.cc
+++ b/src/x64/fast-codegen-x64.cc
@@ -35,78 +35,152 @@
 
 #define __ ACCESS_MASM(masm())
 
-void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+Register FastCodeGenerator::accumulator0() { return rax; }
+Register FastCodeGenerator::accumulator1() { return rdx; }
+Register FastCodeGenerator::scratch0() { return rcx; }
+Register FastCodeGenerator::scratch1() { return rdi; }
+Register FastCodeGenerator::receiver_reg() { return rbx; }
+Register FastCodeGenerator::context_reg() { return rsi; }
+
+
+void FastCodeGenerator::EmitLoadReceiver() {
   // Offset 2 is due to return address and saved frame pointer.
-  int index = 2 + function()->scope()->num_parameters();
-  __ movq(reg, Operand(rbp, index * kPointerSize));
+  int index = 2 + scope()->num_parameters();
+  __ movq(receiver_reg(), Operand(rbp, index * kPointerSize));
 }
 
 
-void FastCodeGenerator::EmitReceiverMapCheck() {
-  Comment cmnt(masm(), ";; MapCheck(this)");
-  if (FLAG_print_ir) {
-    PrintF("MapCheck(this)\n");
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
+  ASSERT(!destination().is(no_reg));
+  ASSERT(cell->IsJSGlobalPropertyCell());
+
+  __ Move(destination(), cell);
+  __ movq(destination(),
+          FieldOperand(destination(), JSGlobalPropertyCell::kValueOffset));
+  if (FLAG_debug_code) {
+    __ Cmp(destination(), Factory::the_hole_value());
+    __ Check(not_equal, "DontDelete cells can't contain the hole");
   }
 
-  EmitLoadReceiver(rdx);
-  __ JumpIfSmi(rdx, bailout());
-
-  ASSERT(has_receiver() && receiver()->IsHeapObject());
-  Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
-  Handle<Map> map(object->map());
-  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), map);
-  __ j(not_equal, bailout());
-}
-
-
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
-  // Compile global variable accesses as load IC calls.  The only live
-  // registers are rsi (context) and possibly rdx (this).  Both are also
-  // saved in the stack and rsi is preserved by the call.
-  __ push(CodeGenerator::GlobalObject());
-  __ Move(rcx, name);
-  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-  if (has_this_properties()) {
-    // Restore this.
-    EmitLoadReceiver(rdx);
-  } else {
-    __ nop();  // Not test rax, indicates IC has no inlined code at call site.
-  }
+  // The loaded value is not known to be a smi.
+  clear_as_smi(destination());
 }
 
 
 void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
   LookupResult lookup;
-  receiver()->Lookup(*name, &lookup);
+  info()->receiver()->Lookup(*name, &lookup);
 
-  ASSERT(lookup.holder() == *receiver());
+  ASSERT(lookup.holder() == *info()->receiver());
   ASSERT(lookup.type() == FIELD);
-  Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+  Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
   int index = lookup.GetFieldIndex() - map->inobject_properties();
   int offset = index * kPointerSize;
 
-  // Negative offsets are inobject properties.
+  // We will emit the write barrier unless the stored value is statically
+  // known to be a smi.
+  bool needs_write_barrier = !is_smi(accumulator0());
+
+  // Perform the store.  Negative offsets are inobject properties.
   if (offset < 0) {
     offset += map->instance_size();
-    __ movq(rcx, rdx);  // Copy receiver for write barrier.
+    __ movq(FieldOperand(receiver_reg(), offset), accumulator0());
+    if (needs_write_barrier) {
+      // Preserve receiver from write barrier.
+      __ movq(scratch0(), receiver_reg());
+    }
   } else {
     offset += FixedArray::kHeaderSize;
-    __ movq(rcx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+    __ movq(scratch0(),
+            FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
+    __ movq(FieldOperand(scratch0(), offset), accumulator0());
   }
-  // Perform the store.
-  __ movq(FieldOperand(rcx, offset), rax);
-  // Preserve value from write barrier in case it's needed.
-  __ movq(rbx, rax);
-  __ RecordWrite(rcx, offset, rbx, rdi);
+
+  if (needs_write_barrier) {
+    if (destination().is(no_reg)) {
+      // After RecordWrite accumulator0 is only accidently a smi, but it is
+      // already marked as not known to be one.
+      __ RecordWrite(scratch0(), offset, accumulator0(), scratch1());
+    } else {
+      // Copy the value to the other accumulator to preserve a copy from the
+      // write barrier. One of the accumulators is available as a scratch
+      // register.  Neither is a smi.
+      __ movq(accumulator1(), accumulator0());
+      clear_as_smi(accumulator1());
+      Register value_scratch = other_accumulator(destination());
+      __ RecordWrite(scratch0(), offset, value_scratch, scratch1());
+    }
+  } else if (destination().is(accumulator1())) {
+    __ movq(accumulator1(), accumulator0());
+    // Is a smi because we do not need the write barrier.
+    set_as_smi(accumulator1());
+  }
 }
 
 
-void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
-  ASSERT(function_ == NULL);
+void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
+  ASSERT(!destination().is(no_reg));
+  LookupResult lookup;
+  info()->receiver()->Lookup(*name, &lookup);
+
+  ASSERT(lookup.holder() == *info()->receiver());
+  ASSERT(lookup.type() == FIELD);
+  Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
+  int index = lookup.GetFieldIndex() - map->inobject_properties();
+  int offset = index * kPointerSize;
+
+  // Perform the load.  Negative offsets are inobject properties.
+  if (offset < 0) {
+    offset += map->instance_size();
+    __ movq(destination(), FieldOperand(receiver_reg(), offset));
+  } else {
+    offset += FixedArray::kHeaderSize;
+    __ movq(scratch0(),
+            FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
+    __ movq(destination(), FieldOperand(scratch0(), offset));
+  }
+
+  // The loaded value is not known to be a smi.
+  clear_as_smi(destination());
+}
+
+
+void FastCodeGenerator::EmitBitOr() {
+  if (is_smi(accumulator0()) && is_smi(accumulator1())) {
+    // If both operands are known to be a smi then there is no need to check
+    // the operands or result.
+    if (destination().is(no_reg)) {
+      __ or_(accumulator1(), accumulator0());
+    } else {
+      // Leave the result in the destination register.  Bitwise or is
+      // commutative.
+      __ or_(destination(), other_accumulator(destination()));
+    }
+  } else if (destination().is(no_reg)) {
+    // Result is not needed but do not clobber the operands in case of
+    // bailout.
+    __ movq(scratch0(), accumulator1());
+    __ or_(scratch0(), accumulator0());
+    __ JumpIfNotSmi(scratch0(), bailout());
+  } else {
+    // Preserve the destination operand in a scratch register in case of
+    // bailout.
+    __ movq(scratch0(), destination());
+    __ or_(destination(), other_accumulator(destination()));
+    __ JumpIfNotSmi(destination(), bailout());
+  }
+
+
+  // If we didn't bailout, the result (in fact, both inputs too) is known to
+  // be a smi.
+  set_as_smi(accumulator0());
+  set_as_smi(accumulator1());
+}
+
+
+void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
   ASSERT(info_ == NULL);
-  function_ = fun;
-  info_ = info;
+  info_ = compilation_info;
 
   // Save the caller's frame pointer and set up our own.
   Comment prologue_cmnt(masm(), ";; Prologue");
@@ -117,18 +191,42 @@
   // Note that we keep a live register reference to esi (context) at this
   // point.
 
-  // Receiver (this) is allocated to rdx if there are this properties.
-  if (has_this_properties()) EmitReceiverMapCheck();
+  // Receiver (this) is allocated to a fixed register.
+  if (info()->has_this_properties()) {
+    Comment cmnt(masm(), ";; MapCheck(this)");
+    if (FLAG_print_ir) {
+      PrintF("MapCheck(this)\n");
+    }
+    ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
+    Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
+    Handle<Map> map(object->map());
+    EmitLoadReceiver();
+    __ CheckMap(receiver_reg(), map, bailout(), false);
+  }
 
-  VisitStatements(fun->body());
+  // If there is a global variable access check if the global object is the
+  // same as at lazy-compilation time.
+  if (info()->has_globals()) {
+    Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
+    if (FLAG_print_ir) {
+      PrintF("MapCheck(GLOBAL)\n");
+    }
+    ASSERT(info()->has_global_object());
+    Handle<Map> map(info()->global_object()->map());
+    __ movq(scratch0(), CodeGenerator::GlobalObject());
+    __ CheckMap(scratch0(), map, bailout(), true);
+  }
+
+  VisitStatements(info()->function()->body());
 
   Comment return_cmnt(masm(), ";; Return(<undefined>)");
+  if (FLAG_print_ir) {
+    PrintF("Return(<undefined>)\n");
+  }
   __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-
-  Comment epilogue_cmnt(masm(), ";; Epilogue");
   __ movq(rsp, rbp);
   __ pop(rbp);
-  __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+  __ ret((scope()->num_parameters() + 1) * kPointerSize);
 
   __ bind(&bailout_);
 }
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index f5bbfaf..30db660 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -51,9 +51,10 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-x64.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
-  function_ = fun;
-  SetFunctionPosition(fun);
+void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+  ASSERT(info_ == NULL);
+  info_ = info;
+  SetFunctionPosition(function());
 
   if (mode == PRIMARY) {
     __ push(rbp);  // Caller's frame pointer.
@@ -62,7 +63,7 @@
     __ push(rdi);  // Callee's JS Function.
 
     { Comment cmnt(masm_, "[ Allocate locals");
-      int locals_count = fun->scope()->num_stack_slots();
+      int locals_count = scope()->num_stack_slots();
       if (locals_count == 1) {
         __ PushRoot(Heap::kUndefinedValueRootIndex);
       } else if (locals_count > 1) {
@@ -76,7 +77,7 @@
     bool function_in_register = true;
 
     // Possibly allocate a local context.
-    if (fun->scope()->num_heap_slots() > 0) {
+    if (scope()->num_heap_slots() > 0) {
       Comment cmnt(masm_, "[ Allocate local context");
       // Argument to NewContext is the function, which is still in rdi.
       __ push(rdi);
@@ -87,9 +88,9 @@
       __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
 
       // Copy any necessary parameters into the context.
-      int num_parameters = fun->scope()->num_parameters();
+      int num_parameters = scope()->num_parameters();
       for (int i = 0; i < num_parameters; i++) {
-        Slot* slot = fun->scope()->parameter(i)->slot();
+        Slot* slot = scope()->parameter(i)->slot();
         if (slot != NULL && slot->type() == Slot::CONTEXT) {
           int parameter_offset = StandardFrameConstants::kCallerSPOffset +
                                      (num_parameters - 1 - i) * kPointerSize;
@@ -108,7 +109,7 @@
     }
 
     // Possibly allocate an arguments object.
-    Variable* arguments = fun->scope()->arguments()->AsVariable();
+    Variable* arguments = scope()->arguments()->AsVariable();
     if (arguments != NULL) {
       // Arguments object must be allocated after the context object, in
       // case the "arguments" or ".arguments" variables are in the context.
@@ -119,10 +120,11 @@
         __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
       }
       // The receiver is just before the parameters on the caller's stack.
-      __ lea(rdx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
-                          fun->num_parameters() * kPointerSize));
+      int offset = scope()->num_parameters() * kPointerSize;
+      __ lea(rdx,
+             Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
       __ push(rdx);
-      __ Push(Smi::FromInt(fun->num_parameters()));
+      __ Push(Smi::FromInt(scope()->num_parameters()));
       // Arguments to ArgumentsAccessStub:
       //   function, receiver address, parameter count.
       // The stub will rewrite receiver and parameter count if the previous
@@ -133,13 +135,13 @@
       __ movq(rcx, rax);
       Move(arguments->slot(), rax, rbx, rdx);
       Slot* dot_arguments_slot =
-          fun->scope()->arguments_shadow()->AsVariable()->slot();
+          scope()->arguments_shadow()->AsVariable()->slot();
       Move(dot_arguments_slot, rcx, rbx, rdx);
     }
   }
 
   { Comment cmnt(masm_, "[ Declarations");
-    VisitDeclarations(fun->scope()->declarations());
+    VisitDeclarations(scope()->declarations());
   }
 
   { Comment cmnt(masm_, "[ Stack check");
@@ -157,14 +159,14 @@
 
   { Comment cmnt(masm_, "[ Body");
     ASSERT(loop_depth() == 0);
-    VisitStatements(fun->body());
+    VisitStatements(function()->body());
     ASSERT(loop_depth() == 0);
   }
 
   { Comment cmnt(masm_, "[ return <undefined>;");
     // Emit a 'return undefined' in case control fell off the end of the body.
     __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-    EmitReturnSequence(function_->end_position());
+    EmitReturnSequence(function()->end_position());
   }
 }
 
@@ -190,7 +192,7 @@
     // patch with the code required by the debugger.
     __ movq(rsp, rbp);
     __ pop(rbp);
-    __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+    __ ret((scope()->num_parameters() + 1) * kPointerSize);
 #ifdef ENABLE_DEBUGGER_SUPPORT
     // Add padding that will be overwritten by a debugger breakpoint.  We
     // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
@@ -629,7 +631,7 @@
       return Operand(rbp, SlotOffset(slot));
     case Slot::CONTEXT: {
       int context_chain_length =
-          function_->scope()->ContextChainLength(slot->var()->scope());
+          scope()->ContextChainLength(slot->var()->scope());
       __ LoadContext(scratch, context_chain_length);
       return CodeGenerator::ContextOperand(scratch, slot->index());
     }
@@ -688,7 +690,7 @@
         // this specific context.
 
         // The variable in the decl always resides in the current context.
-        ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+        ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
         if (FLAG_debug_code) {
           // Check if we have the correct context pointer.
           __ movq(rbx,
@@ -767,7 +769,7 @@
   // Call the runtime to declare the globals.
   __ push(rsi);  // The context is the first argument.
   __ Push(pairs);
-  __ Push(Smi::FromInt(is_eval_ ? 1 : 0));
+  __ Push(Smi::FromInt(is_eval() ? 1 : 0));
   __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
@@ -778,7 +780,7 @@
 
   // Build the function boilerplate and instantiate it.
   Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(expr, script_, this);
+      Compiler::BuildBoilerplate(expr, script(), this);
   if (HasStackOverflow()) return;
 
   ASSERT(boilerplate->IsBoilerplate());
@@ -1013,6 +1015,92 @@
 }
 
 
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+  Comment cmnt(masm_, "[ Assignment");
+  ASSERT(expr->op() != Token::INIT_CONST);
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->target()->AsProperty();
+  if (prop != NULL) {
+    assign_type =
+        (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+  }
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      if (expr->is_compound()) {
+        // We need the receiver both on the stack and in the accumulator.
+        VisitForValue(prop->obj(), kAccumulator);
+        __ push(result_register());
+      } else {
+        VisitForValue(prop->obj(), kStack);
+      }
+      break;
+    case KEYED_PROPERTY:
+      VisitForValue(prop->obj(), kStack);
+      VisitForValue(prop->key(), kStack);
+      break;
+  }
+
+  // If we have a compound assignment: Get value of LHS expression and
+  // store in on top of the stack.
+  if (expr->is_compound()) {
+    Location saved_location = location_;
+    location_ = kStack;
+    switch (assign_type) {
+      case VARIABLE:
+        EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
+                         Expression::kValue);
+        break;
+      case NAMED_PROPERTY:
+        EmitNamedPropertyLoad(prop);
+        __ push(result_register());
+        break;
+      case KEYED_PROPERTY:
+        EmitKeyedPropertyLoad(prop);
+        __ push(result_register());
+        break;
+    }
+    location_ = saved_location;
+  }
+
+  // Evaluate RHS expression.
+  Expression* rhs = expr->value();
+  VisitForValue(rhs, kAccumulator);
+
+  // If we have a compound assignment: Apply operator.
+  if (expr->is_compound()) {
+    Location saved_location = location_;
+    location_ = kAccumulator;
+    EmitBinaryOp(expr->binary_op(), Expression::kValue);
+    location_ = saved_location;
+  }
+
+  // Record source position before possible IC call.
+  SetSourcePosition(expr->position());
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE:
+      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             context_);
+      break;
+    case NAMED_PROPERTY:
+      EmitNamedPropertyAssignment(expr);
+      break;
+    case KEYED_PROPERTY:
+      EmitKeyedPropertyAssignment(expr);
+      break;
+  }
+}
+
+
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
@@ -1198,7 +1286,7 @@
 
 
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
-                                       Handle<Object> ignored,
+                                       Handle<Object> name,
                                        RelocInfo::Mode mode) {
   // Code common for calls using the IC.
   ZoneList<Expression*>* args = expr->arguments();
@@ -1206,6 +1294,7 @@
   for (int i = 0; i < arg_count; i++) {
     VisitForValue(args->at(i), kStack);
   }
+  __ Move(rcx, name);
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   // Call the IC initialization code.
@@ -1215,8 +1304,7 @@
   __ Call(ic, mode);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  // Discard the function left on TOS.
-  DropAndApply(1, context_, rax);
+  Apply(context_, rax);
 }
 
 
@@ -1248,7 +1336,6 @@
     UNREACHABLE();
   } else if (var != NULL && !var->is_this() && var->is_global()) {
     // Call to a global variable.
-    __ Push(var->name());
     // Push global object as receiver for the call IC lookup.
     __ push(CodeGenerator::GlobalObject());
     EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
@@ -1262,7 +1349,6 @@
     Literal* key = prop->key()->AsLiteral();
     if (key != NULL && key->handle()->IsSymbol()) {
       // Call to a named property, use call IC.
-      __ Push(key->handle());
       VisitForValue(prop->obj(), kStack);
       EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
     } else {
@@ -1353,7 +1439,6 @@
 
   if (expr->is_jsruntime()) {
     // Prepare for calling JS runtime function.
-    __ Push(expr->name());
     __ movq(rax, CodeGenerator::GlobalObject());
     __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
   }
@@ -1365,18 +1450,17 @@
   }
 
   if (expr->is_jsruntime()) {
-    // Call the JS runtime function.
-    Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
-                                                           NOT_IN_LOOP);
+    // Call the JS runtime function using a call IC.
+    __ Move(rcx, expr->name());
+    InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+    Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
     __ call(ic, RelocInfo::CODE_TARGET);
     // Restore context register.
     __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-    // Discard the function left on TOS.
-    DropAndApply(1, context_, rax);
   } else {
     __ CallRuntime(expr->function(), arg_count);
-    Apply(context_, rax);
   }
+  Apply(context_, rax);
 }
 
 
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 28bfd2e..0e93637 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -228,23 +228,37 @@
 }
 
 
-void KeyedLoadIC::Generate(MacroAssembler* masm,
-                           ExternalReference const& f) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rsp[0]  : return address
   //  -- rsp[8]  : name
   //  -- rsp[16] : receiver
   // -----------------------------------
 
-  __ movq(rax, Operand(rsp, kPointerSize));
-  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
   __ pop(rbx);
-  __ push(rcx);  // receiver
-  __ push(rax);  // name
+  __ push(Operand(rsp, 1 * kPointerSize));  // receiver
+  __ push(Operand(rsp, 1 * kPointerSize));  // name
   __ push(rbx);  // return address
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(f, 2, 1);
+  __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+
+  __ pop(rbx);
+  __ push(Operand(rsp, 1 * kPointerSize));  // receiver
+  __ push(Operand(rsp, 1 * kPointerSize));  // name
+  __ push(rbx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(ExternalReference(Runtime::kKeyedGetProperty), 2, 1);
 }
 
 
@@ -317,7 +331,7 @@
   // Slow case: Load name and receiver from stack and jump to runtime.
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
-  Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+  GenerateRuntimeGetProperty(masm);
   __ bind(&check_string);
   // The key is not a smi.
   // Is it a string?
@@ -555,21 +569,54 @@
   // Slow case: Load name and receiver from stack and jump to runtime.
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
-  Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+  GenerateRuntimeGetProperty(masm);
 }
 
 
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rsp[0] : return address
-  //  -- rsp[8] : name
+  //  -- rsp[8] : key
   //  -- rsp[16] : receiver
   // -----------------------------------
-  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+  Label slow;
+
+  // Load key and receiver.
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(rcx, &slow);
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rax, &slow);
+
+  // Get the map of the receiver.
+  __ movq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
+
+  // Check that it has indexed interceptor and access checks
+  // are not enabled for this object.
+  __ movb(rdx, FieldOperand(rdx, Map::kBitFieldOffset));
+  __ andb(rdx, Immediate(kSlowCaseBitFieldMask));
+  __ cmpb(rdx, Immediate(1 << Map::kHasIndexedInterceptor));
+  __ j(not_zero, &slow);
+
+  // Everything is fine, call runtime.
+  __ pop(rdx);
+  __ push(rcx);  // receiver
+  __ push(rax);  // key
+  __ push(rdx);  // return address
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(ExternalReference(
+        IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
+
+  __ bind(&slow);
+  GenerateMiss(masm);
 }
 
 
-void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax     : value
   //  -- rsp[0]  : return address
@@ -584,28 +631,26 @@
   __ push(rcx);  // return address
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(f, 3, 1);
+  __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
 }
 
 
-void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax     : value
-  //  -- rcx     : transition map
   //  -- rsp[0]  : return address
   //  -- rsp[8]  : key
   //  -- rsp[16] : receiver
   // -----------------------------------
 
-  __ pop(rbx);
+  __ pop(rcx);
   __ push(Operand(rsp, 1 * kPointerSize));  // receiver
-  __ push(rcx);  // transition map
+  __ push(Operand(rsp, 1 * kPointerSize));  // key
   __ push(rax);  // value
-  __ push(rbx);  // return address
+  __ push(rcx);  // return address
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(
-      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
+  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
 }
 
 
@@ -659,7 +704,7 @@
 
   // Slow case: call runtime.
   __ bind(&slow);
-  Generate(masm, ExternalReference(Runtime::kSetProperty));
+  GenerateRuntimeSetProperty(masm);
 
   // Check whether the elements is a pixel array.
   // rax: value
@@ -923,23 +968,29 @@
 
   // Slow case: call runtime.
   __ bind(&slow);
-  Generate(masm, ExternalReference(Runtime::kSetProperty));
+  GenerateRuntimeSetProperty(masm);
 }
 
 
 void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  // rcx                      : function name
+  // rsp[0]                   : return address
+  // rsp[8]                   : argument argc
+  // rsp[16]                  : argument argc - 1
+  // ...
+  // rsp[argc * 8]            : argument 1
+  // rsp[(argc + 1) * 8]      : argument 0 = receiver
+  // -----------------------------------
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-  // Get the name of the function to call from the stack.
-  // 2 ~ receiver, return address.
-  __ movq(rbx, Operand(rsp, (argc + 2) * kPointerSize));
 
   // Enter an internal frame.
   __ EnterInternalFrame();
 
   // Push the receiver and the name of the function.
   __ push(rdx);
-  __ push(rbx);
+  __ push(rcx);
 
   // Call the entry.
   CEntryStub stub(1);
@@ -977,20 +1028,18 @@
 
 void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
-  // rsp[0] return address
-  // rsp[8] argument argc
-  // rsp[16] argument argc - 1
+  // rcx                      : function name
+  // rsp[0]                   : return address
+  // rsp[8]                   : argument argc
+  // rsp[16]                  : argument argc - 1
   // ...
-  // rsp[argc * 8] argument 1
-  // rsp[(argc + 1) * 8] argument 0 = receiver
-  // rsp[(argc + 2) * 8] function name
+  // rsp[argc * 8]            : argument 1
+  // rsp[(argc + 1) * 8]      : argument 0 = receiver
   // -----------------------------------
   Label number, non_number, non_string, boolean, probe, miss;
 
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-  // Get the name of the function from the stack; 2 ~ return address, receiver
-  __ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
 
   // Probe the stub cache.
   Code::Flags flags =
@@ -1043,6 +1092,16 @@
                                  int argc,
                                  bool is_global_object,
                                  Label* miss) {
+  // ----------- S t a t e -------------
+  // rcx                    : function name
+  // rdx                    : receiver
+  // rsp[0]                 : return address
+  // rsp[8]                 : argument argc
+  // rsp[16]                : argument argc - 1
+  // ...
+  // rsp[argc * 8]          : argument 1
+  // rsp[(argc + 1) * 8]    : argument 0 = receiver
+  // -----------------------------------
   // Search dictionary - put result in register rdx.
   GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, CHECK_DICTIONARY);
 
@@ -1073,20 +1132,18 @@
 
 void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
-  // rsp[0] return address
-  // rsp[8] argument argc
-  // rsp[16] argument argc - 1
+  // rcx                    : function name
+  // rsp[0]                 : return address
+  // rsp[8]                 : argument argc
+  // rsp[16]                : argument argc - 1
   // ...
-  // rsp[argc * 8] argument 1
-  // rsp[(argc + 1) * 8] argument 0 = receiver
-  // rsp[(argc + 2) * 8] function name
+  // rsp[argc * 8]          : argument 1
+  // rsp[(argc + 1) * 8]    : argument 0 = receiver
   // -----------------------------------
   Label miss, global_object, non_global_object;
 
   // Get the receiver of the function from the stack.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-  // Get the name of the function from the stack.
-  __ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
 
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(rdx, &miss);
@@ -1153,22 +1210,20 @@
 }
 
 
-void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rcx    : name
   //  -- rsp[0] : return address
   //  -- rsp[8] : receiver
   // -----------------------------------
 
-  __ movq(rax, Operand(rsp, kPointerSize));
-
   __ pop(rbx);
-  __ push(rax);  // receiver
+  __ push(Operand(rsp, 0));  // receiver
   __ push(rcx);  // name
   __ push(rbx);  // return address
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(f, 2, 1);
+  __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
 }
 
 
@@ -1224,17 +1279,6 @@
 }
 
 
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rcx    : name
-  //  -- rsp[0] : return address
-  //  -- rsp[8] : receiver
-  // -----------------------------------
-
-  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
-}
-
-
 void LoadIC::GenerateNormal(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rcx    : name
@@ -1278,7 +1322,7 @@
   // Cache miss: Restore receiver from stack and jump to runtime.
   __ bind(&miss);
   __ movq(rax, Operand(rsp, 1 * kPointerSize));
-  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+  GenerateMiss(masm);
 }
 
 
@@ -1292,13 +1336,12 @@
 
   __ movq(rax, Operand(rsp, kPointerSize));
 
-  StubCompiler::GenerateLoadStringLength(masm, rax, rdx, &miss);
+  StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss);
   __ bind(&miss);
   StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
 }
 
 
-
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
   // The address of the instruction following the call.
   Address test_instruction_address =
@@ -1326,6 +1369,7 @@
   return true;
 }
 
+
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : value
@@ -1344,24 +1388,6 @@
   __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
 }
 
-void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rcx    : Map (target of map transition)
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  __ pop(rbx);
-  __ push(rdx);  // receiver
-  __ push(rcx);  // transition map
-  __ push(rax);  // value
-  __ push(rbx);  // return address
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(
-      ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
-}
 
 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -1382,6 +1408,55 @@
 }
 
 
+void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  //
+  // This accepts as a receiver anything JSObject::SetElementsLength accepts
+  // (currently anything except for external and pixel arrays which means
+  // anything with elements of FixedArray type.), but currently is restricted
+  // to JSArray.
+  // Value must be a number, but only smis are accepted as the most common case.
+
+  Label miss;
+
+  Register receiver = rdx;
+  Register value = rax;
+  Register scratch = rbx;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Check that the object is a JS array.
+  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+  __ j(not_equal, &miss);
+
+  // Check that elements are FixedArray.
+  __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
+  __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
+  __ j(not_equal, &miss);
+
+  // Check that value is a smi.
+  __ JumpIfNotSmi(value, &miss);
+
+  // Prepare tail call to StoreIC_ArrayLength.
+  __ pop(scratch);
+  __ push(receiver);
+  __ push(value);
+  __ push(scratch);  // return address
+
+  __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1);
+
+  __ bind(&miss);
+
+  GenerateMiss(masm);
+}
+
+
 #undef __
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 96b45e8..90a9c75 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -39,7 +39,6 @@
 
 MacroAssembler::MacroAssembler(void* buffer, int size)
     : Assembler(buffer, size),
-      unresolved_(0),
       generating_stub_(false),
       allow_stub_calls_(true),
       code_object_(Heap::undefined_value()) {
@@ -387,6 +386,16 @@
 }
 
 
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+                                           int num_arguments) {
+  movq(rax, Immediate(num_arguments));
+  movq(rbx, ext);
+
+  CEntryStub stub(1);
+  CallStub(&stub);
+}
+
+
 void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
                                      int num_arguments,
                                      int result_size) {
@@ -415,38 +424,30 @@
 }
 
 
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
-  bool resolved;
-  Handle<Code> code = ResolveBuiltin(id, &resolved);
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
+  // Calls are not allowed in some stubs.
+  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
 
-  const char* name = Builtins::GetName(id);
-  int argc = Builtins::GetArgumentsCount(id);
-
-  movq(target, code, RelocInfo::EMBEDDED_OBJECT);
-  if (!resolved) {
-    uint32_t flags =
-        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsUseCodeObject::encode(true);
-    Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name };
-    unresolved_.Add(entry);
-  }
-  addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  // Rely on the assertion to check that the number of provided
+  // arguments match the expected number of arguments. Fake a
+  // parameter count to avoid emitting code to do the check.
+  ParameterCount expected(0);
+  GetBuiltinEntry(rdx, id);
+  InvokeCode(rdx, expected, expected, flag);
 }
 
-Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
-                                            bool* resolved) {
-  // Move the builtin function into the temporary function slot by
-  // reading it from the builtins object. NOTE: We should be able to
-  // reduce this to two instructions by putting the function table in
-  // the global object instead of the "builtins" object and by using a
-  // real register for the function.
-  movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  movq(rdx, FieldOperand(rdx, GlobalObject::kBuiltinsOffset));
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+  // Load the JavaScript builtin function from the builtins object.
+  movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  movq(rdi, FieldOperand(rdi, GlobalObject::kBuiltinsOffset));
   int builtins_offset =
       JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
-  movq(rdi, FieldOperand(rdx, builtins_offset));
-
-  return Builtins::GetCode(id, resolved);
+  movq(rdi, FieldOperand(rdi, builtins_offset));
+  // Load the code entry point from the function into the target register.
+  movq(target, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  movq(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
+  addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
 }
 
 
@@ -1585,6 +1586,29 @@
 }
 
 
+void MacroAssembler::CheckMap(Register obj,
+                              Handle<Map> map,
+                              Label* fail,
+                              bool is_heap_object) {
+  if (!is_heap_object) {
+    JumpIfSmi(obj, fail);
+  }
+  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+  j(not_equal, fail);
+}
+
+
+void MacroAssembler::AbortIfNotNumber(Register object, const char* msg) {
+  Label ok;
+  Condition is_smi = CheckSmi(object);
+  j(is_smi, &ok);
+  Cmp(FieldOperand(object, HeapObject::kMapOffset),
+      Factory::heap_number_map());
+  Assert(equal, msg);
+  bind(&ok);
+}
+
+
 Condition MacroAssembler::IsObjectStringType(Register heap_object,
                                              Register map,
                                              Register instance_type) {
@@ -1762,39 +1786,14 @@
   }
 }
 
-#endif  // ENABLE_DEBUGGER_SUPPORT
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
-  bool resolved;
-  Handle<Code> code = ResolveBuiltin(id, &resolved);
-
-  // Calls are not allowed in some stubs.
-  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
-
-  // Rely on the assertion to check that the number of provided
-  // arguments match the expected number of arguments. Fake a
-  // parameter count to avoid emitting code to do the check.
-  ParameterCount expected(0);
-  InvokeCode(Handle<Code>(code),
-             expected,
-             expected,
-             RelocInfo::CODE_TARGET,
-             flag);
-
-  const char* name = Builtins::GetName(id);
-  int argc = Builtins::GetArgumentsCount(id);
-  // The target address for the jump is stored as an immediate at offset
-  // kInvokeCodeAddressOffset.
-  if (!resolved) {
-    uint32_t flags =
-        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsUseCodeObject::encode(false);
-    Unresolved entry =
-        { pc_offset() - kCallTargetAddressOffset, flags, name };
-    unresolved_.Add(entry);
-  }
+void MacroAssembler::DebugBreak() {
+  ASSERT(allow_stub_calls());
+  xor_(rax, rax);  // no arguments
+  movq(rbx, ExternalReference(Runtime::kDebugBreak));
+  CEntryStub ces(1);
+  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
+#endif  // ENABLE_DEBUGGER_SUPPORT
 
 
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
@@ -1914,6 +1913,21 @@
 }
 
 
+void MacroAssembler::InvokeFunction(JSFunction* function,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag) {
+  ASSERT(function->is_compiled());
+  // Get the function and setup the context.
+  Move(rdi, Handle<JSFunction>(function));
+  movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // Invoke the cached code.
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+}
+
+
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   push(rbp);
   movq(rbp, rsp);
@@ -1953,13 +1967,9 @@
 
   // Reserve room for entry stack pointer and push the debug marker.
   ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
-  push(Immediate(0));  // saved entry sp, patched before call
-  if (mode == ExitFrame::MODE_DEBUG) {
-    push(Immediate(0));
-  } else {
-    movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
-    push(kScratchRegister);
-  }
+  push(Immediate(0));  // Saved entry sp, patched before call.
+  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+  push(kScratchRegister);  // Accessed from EditFrame::code_slot.
 
   // Save the frame pointer and the context in top.
   ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 2913274..6deeddc 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -98,6 +98,7 @@
   void CopyRegistersFromStackToMemory(Register base,
                                       Register scratch,
                                       RegList regs);
+  void DebugBreak();
 #endif
 
   // ---------------------------------------------------------------------------
@@ -148,6 +149,10 @@
                       const ParameterCount& actual,
                       InvokeFlag flag);
 
+  void InvokeFunction(JSFunction* function,
+                      const ParameterCount& actual,
+                      InvokeFlag flag);
+
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
@@ -460,6 +465,14 @@
   // Always use unsigned comparisons: above and below, not less and greater.
   void CmpInstanceType(Register map, InstanceType type);
 
+  // Check if the map of an object is equal to a specified map and
+  // branch to label if not. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void CheckMap(Register obj,
+                Handle<Map> map,
+                Label* fail,
+                bool is_heap_object);
+
   // Check if the object in register heap_object is a string. Afterwards the
   // register map contains the object map and the register instance_type
   // contains the instance_type. The registers map and instance_type can be the
@@ -473,6 +486,9 @@
   // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
   void FCmp();
 
+  // Abort execution if argument is not a number. Used in debug code.
+  void AbortIfNotNumber(Register object, const char* msg);
+
   // ---------------------------------------------------------------------------
   // Exception handling
 
@@ -635,6 +651,10 @@
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId id, int num_arguments);
 
+  // Convenience function: call an external reference.
+  void CallExternalReference(const ExternalReference& ext,
+                             int num_arguments);
+
   // Tail call of a runtime routine (jump).
   // Like JumpToRuntime, but also takes care of passing the number
   // of arguments.
@@ -671,13 +691,6 @@
 
   void Ret();
 
-  struct Unresolved {
-    int pc;
-    uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
-    const char* name;
-  };
-  List<Unresolved>* unresolved() { return &unresolved_; }
-
   Handle<Object> CodeObject() { return code_object_; }
 
 
@@ -709,11 +722,10 @@
   bool allow_stub_calls() { return allow_stub_calls_; }
 
  private:
-  List<Unresolved> unresolved_;
   bool generating_stub_;
   bool allow_stub_calls_;
-  Handle<Object> code_object_;  // This handle will be patched with the code
-                                // object on installation.
+  // This handle will be patched with the code object on installation.
+  Handle<Object> code_object_;
 
   // Helper functions for generating invokes.
   void InvokePrologue(const ParameterCount& expected,
@@ -723,18 +735,6 @@
                       Label* done,
                       InvokeFlag flag);
 
-  // Prepares for a call or jump to a builtin by doing two things:
-  // 1. Emits code that fetches the builtin's function object from the context
-  //    at runtime, and puts it in the register rdi.
-  // 2. Fetches the builtin's code object, and returns it in a handle, at
-  //    compile time, so that later code can emit instructions to jump or call
-  //    the builtin directly.  If the code object has not yet been created, it
-  //    returns the builtin code object for IllegalFunction, and sets the
-  //    output parameter "resolved" to false.  Code that uses the return value
-  //    should then add the address and the builtin name to the list of fixups
-  //    called unresolved_, which is fixed up by the bootstrapper.
-  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
-
   // Activation support.
   void EnterFrame(StackFrame::Type type);
   void LeaveFrame(StackFrame::Type type);
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 693447b..9c8b4f7 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -133,11 +133,10 @@
 }
 
 
-template <typename Pushable>
 static void PushInterceptorArguments(MacroAssembler* masm,
                                      Register receiver,
                                      Register holder,
-                                     Pushable name,
+                                     Register name,
                                      JSObject* holder_obj) {
   __ push(receiver);
   __ push(holder);
@@ -201,8 +200,9 @@
 }
 
 
+// Both name_reg and receiver_reg are preserved on jumps to miss_label,
+// but may be destroyed if store is successful.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      Builtins::Name storage_extend,
                                       JSObject* object,
                                       int index,
                                       Map* transition,
@@ -231,9 +231,13 @@
   if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
-    __ Move(rcx, Handle<Map>(transition));
-    Handle<Code> ic(Builtins::builtin(storage_extend));
-    __ Jump(ic, RelocInfo::CODE_TARGET);
+    __ pop(scratch);  // Return address.
+    __ push(receiver_reg);
+    __ Push(Handle<Map>(transition));
+    __ push(rax);
+    __ push(scratch);
+    __ TailCallRuntime(
+        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), 3, 1);
     return;
   }
 
@@ -314,38 +318,39 @@
 
 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
                                             Register receiver,
-                                            Register scratch,
+                                            Register scratch1,
+                                            Register scratch2,
                                             Label* miss) {
-  Label load_length, check_wrapper;
+  Label check_wrapper;
 
   // Check if the object is a string leaving the instance type in the
   // scratch register.
-  GenerateStringCheck(masm, receiver, scratch, miss, &check_wrapper);
+  GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
 
   // Load length directly from the string.
-  __ bind(&load_length);
   __ movl(rax, FieldOperand(receiver, String::kLengthOffset));
   __ Integer32ToSmi(rax, rax);
   __ ret(0);
 
   // Check if the object is a JSValue wrapper.
   __ bind(&check_wrapper);
-  __ cmpl(scratch, Immediate(JS_VALUE_TYPE));
+  __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
   __ j(not_equal, miss);
 
   // Check if the wrapped value is a string and load the length
   // directly if it is.
-  __ movq(receiver, FieldOperand(receiver, JSValue::kValueOffset));
-  GenerateStringCheck(masm, receiver, scratch, miss, miss);
-  __ jmp(&load_length);
+  __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+  GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+  __ movl(rax, FieldOperand(scratch2, String::kLengthOffset));
+  __ Integer32ToSmi(rax, rax);
+  __ ret(0);
 }
 
 
-template <class Pushable>
 static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
                                                    Register receiver,
                                                    Register holder,
-                                                   Pushable name,
+                                                   Register name,
                                                    JSObject* holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
 
@@ -394,7 +399,7 @@
       stub_compiler->CheckPrototypes(object, receiver, holder,
                                      scratch1, scratch2, name, miss);
 
-  if (lookup->IsValid() && lookup->IsCacheable()) {
+  if (lookup->IsProperty() && lookup->IsCacheable()) {
     compiler->CompileCacheable(masm,
                                stub_compiler,
                                receiver,
@@ -430,7 +435,7 @@
                         LookupResult* lookup,
                         String* name,
                         Label* miss_label) {
-    AccessorInfo* callback = 0;
+    AccessorInfo* callback = NULL;
     bool optimize = false;
     // So far the most popular follow ups for interceptor loads are FIELD
     // and CALLBACKS, so inline only them, other cases may be added
@@ -553,8 +558,8 @@
 
 class CallInterceptorCompiler BASE_EMBEDDED {
  public:
-  explicit CallInterceptorCompiler(const ParameterCount& arguments)
-      : arguments_(arguments), argc_(arguments.immediate()) {}
+  CallInterceptorCompiler(const ParameterCount& arguments, Register name)
+      : arguments_(arguments), name_(name) {}
 
   void CompileCacheable(MacroAssembler* masm,
                         StubCompiler* stub_compiler,
@@ -584,18 +589,20 @@
       return;
     }
 
+    ASSERT(!lookup->holder()->IsGlobalObject());
+
     __ EnterInternalFrame();
-    __ push(holder);  // save the holder
+    __ push(holder);  // Save the holder.
+    __ push(name_);  // Save the name.
 
-    CompileCallLoadPropertyWithInterceptor(
-        masm,
-        receiver,
-        holder,
-        // Under EnterInternalFrame this refers to name.
-        Operand(rbp, (argc_ + 3) * kPointerSize),
-        holder_obj);
+    CompileCallLoadPropertyWithInterceptor(masm,
+                                           receiver,
+                                           holder,
+                                           name_,
+                                           holder_obj);
 
-    __ pop(receiver);  // restore holder
+    __ pop(name_);  // Restore the name.
+    __ pop(receiver);  // Restore the holder.
     __ LeaveInternalFrame();
 
     __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
@@ -607,22 +614,8 @@
                                    scratch2,
                                    name,
                                    miss_label);
-    if (lookup->holder()->IsGlobalObject()) {
-      __ movq(rdx, Operand(rsp, (argc_ + 1) * kPointerSize));
-      __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
-      __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdx);
-    }
 
-    ASSERT(function->is_compiled());
-    // Get the function and setup the context.
-    __ Move(rdi, Handle<JSFunction>(function));
-    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
-    // Jump to the cached code (tail call).
-    Handle<Code> code(function->code());
-    ParameterCount expected(function->shared()->formal_parameter_count());
-    __ InvokeCode(code, expected, arguments_,
-                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+    __ InvokeFunction(function, arguments_, JUMP_FUNCTION);
 
     __ bind(&invoke);
   }
@@ -634,27 +627,26 @@
                       JSObject* holder_obj,
                       Label* miss_label) {
     __ EnterInternalFrame();
+    // Save the name_ register across the call.
+    __ push(name_);
 
     PushInterceptorArguments(masm,
                              receiver,
                              holder,
-                             Operand(rbp, (argc_ + 3) * kPointerSize),
+                             name_,
                              holder_obj);
 
-    ExternalReference ref = ExternalReference(
-        IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
-    __ movq(rax, Immediate(5));
-    __ movq(rbx, ref);
+    __ CallExternalReference(
+        ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
+        5);
 
-    CEntryStub stub(1);
-    __ CallStub(&stub);
-
+    __ pop(name_);
     __ LeaveInternalFrame();
   }
 
  private:
   const ParameterCount& arguments_;
-  int argc_;
+  Register name_;
 };
 
 
@@ -669,14 +661,14 @@
                                               String* name,
                                               StubCompiler::CheckType check) {
   // ----------- S t a t e -------------
-  // -----------------------------------
-  // rsp[0] return address
-  // rsp[8] argument argc
-  // rsp[16] argument argc - 1
+  // rcx                 : function name
+  // rsp[0]              : return address
+  // rsp[8]              : argument argc
+  // rsp[16]             : argument argc - 1
   // ...
-  // rsp[argc * 8] argument 1
-  // rsp[(argc + 1) * 8] argument 0 = receiver
-  // rsp[(argc + 2) * 8] function name
+  // rsp[argc * 8]       : argument 1
+  // rsp[(argc + 1) * 8] : argument 0 = receiver
+  // -----------------------------------
 
   Label miss;
 
@@ -697,7 +689,7 @@
     case RECEIVER_MAP_CHECK:
       // Check that the maps haven't changed.
       CheckPrototypes(JSObject::cast(object), rdx, holder,
-                      rbx, rcx, name, &miss);
+                      rbx, rax, name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -713,13 +705,13 @@
         __ jmp(&miss);
       } else {
         // Check that the object is a two-byte string or a symbol.
-        __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rcx);
+        __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
         __ j(above_equal, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            rcx);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+                                            rax);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
                         rbx, rdx, name, &miss);
       }
       break;
@@ -732,14 +724,14 @@
         Label fast;
         // Check that the object is a smi or a heap number.
         __ JumpIfSmi(rdx, &fast);
-        __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+        __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax);
         __ j(not_equal, &miss);
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateLoadGlobalFunctionPrototype(masm(),
                                             Context::NUMBER_FUNCTION_INDEX,
-                                            rcx);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+                                            rax);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
                         rbx, rdx, name, &miss);
       }
       break;
@@ -760,8 +752,8 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateLoadGlobalFunctionPrototype(masm(),
                                             Context::BOOLEAN_FUNCTION_INDEX,
-                                            rcx);
-        CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+                                            rax);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
                         rbx, rdx, name, &miss);
       }
       break;
@@ -769,7 +761,7 @@
 
     case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
       CheckPrototypes(JSObject::cast(object), rdx, holder,
-                      rbx, rcx, name, &miss);
+                      rbx, rax, name, &miss);
       // Make sure object->HasFastElements().
       // Get the elements array of the object.
       __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -783,16 +775,7 @@
       UNREACHABLE();
   }
 
-  // Get the function and setup the context.
-  __ Move(rdi, Handle<JSFunction>(function));
-  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
-  // Jump to the cached code (tail call).
-  ASSERT(function->is_compiled());
-  Handle<Code> code(function->code());
-  ParameterCount expected(function->shared()->formal_parameter_count());
-  __ InvokeCode(code, expected, arguments(),
-                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -808,19 +791,19 @@
 }
 
 
-Object* CallStubCompiler::CompileCallField(Object* object,
+Object* CallStubCompiler::CompileCallField(JSObject* object,
                                            JSObject* holder,
                                            int index,
                                            String* name) {
   // ----------- S t a t e -------------
-  // -----------------------------------
-  // rsp[0] return address
-  // rsp[8] argument argc
-  // rsp[16] argument argc - 1
+  // rcx                 : function name
+  // rsp[0]              : return address
+  // rsp[8]              : argument argc
+  // rsp[16]             : argument argc - 1
   // ...
-  // rsp[argc * 8] argument 1
-  // rsp[(argc + 1) * 8] argument 0 = receiver
-  // rsp[(argc + 2) * 8] function name
+  // rsp[argc * 8]       : argument 1
+  // rsp[(argc + 1) * 8] : argument 0 = receiver
+  // -----------------------------------
   Label miss;
 
   // Get the receiver from the stack.
@@ -831,9 +814,7 @@
   __ JumpIfSmi(rdx, &miss);
 
   // Do the right check and compute the holder register.
-  Register reg =
-      CheckPrototypes(JSObject::cast(object), rdx, holder,
-                      rbx, rcx, name, &miss);
+  Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, name, &miss);
 
   GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
 
@@ -862,10 +843,17 @@
 }
 
 
-Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
                                                  JSObject* holder,
                                                  String* name) {
   // ----------- S t a t e -------------
+  // rcx                 : function name
+  // rsp[0]              : return address
+  // rsp[8]              : argument argc
+  // rsp[16]             : argument argc - 1
+  // ...
+  // rsp[argc * 8]       : argument 1
+  // rsp[(argc + 1) * 8] : argument 0 = receiver
   // -----------------------------------
   Label miss;
 
@@ -878,17 +866,17 @@
   // Get the receiver from the stack.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(arguments());
+  CallInterceptorCompiler compiler(arguments(), rcx);
   CompileLoadInterceptor(&compiler,
                          this,
                          masm(),
-                         JSObject::cast(object),
+                         object,
                          holder,
                          name,
                          &lookup,
                          rdx,
                          rbx,
-                         rcx,
+                         rdi,
                          &miss);
 
   // Restore receiver.
@@ -920,7 +908,6 @@
 }
 
 
-
 Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
                                             GlobalObject* holder,
                                             JSGlobalPropertyCell* cell,
@@ -928,13 +915,13 @@
                                             String* name) {
   // ----------- S t a t e -------------
   // -----------------------------------
-  // rsp[0] return address
-  // rsp[8] argument argc
-  // rsp[16] argument argc - 1
+  // rcx                 : function name
+  // rsp[0]              : return address
+  // rsp[8]              : argument argc
+  // rsp[16]             : argument argc - 1
   // ...
-  // rsp[argc * 8] argument 1
-  // rsp[(argc + 1) * 8] argument 0 = receiver
-  // rsp[(argc + 2) * 8] function name
+  // rsp[argc * 8]       : argument 1
+  // rsp[(argc + 1) * 8] : argument 0 = receiver
   Label miss;
 
   // Get the number of arguments.
@@ -951,7 +938,7 @@
   }
 
   // Check that the maps haven't changed.
-  CheckPrototypes(object, rdx, holder, rbx, rcx, name, &miss);
+  CheckPrototypes(object, rdx, holder, rbx, rax, name, &miss);
 
   // Get the value from the cell.
   __ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
@@ -965,12 +952,12 @@
     // function can all use this call IC. Before we load through the
     // function, we have to verify that it still is a function.
     __ JumpIfSmi(rdi, &miss);
-    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
     __ j(not_equal, &miss);
 
     // Check the shared function info. Make sure it hasn't changed.
-    __ Move(rcx, Handle<SharedFunctionInfo>(function->shared()));
-    __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rcx);
+    __ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
+    __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
     __ j(not_equal, &miss);
   } else {
     __ Cmp(rdi, Handle<JSFunction>(function));
@@ -1325,7 +1312,7 @@
   __ Cmp(rax, Handle<String>(name));
   __ j(not_equal, &miss);
 
-  GenerateLoadStringLength(masm(), rcx, rdx, &miss);
+  GenerateLoadStringLength(masm(), rcx, rdx, rbx, &miss);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_string_length, 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1397,9 +1384,8 @@
   // -----------------------------------
   Label miss;
 
-  // Generate store field code.  Trashes the name register.
+  // Generate store field code.  Preserves receiver and name on jump to miss.
   GenerateStoreField(masm(),
-                     Builtins::StoreIC_ExtendStorage,
                      object,
                      index,
                      transition,
@@ -1408,7 +1394,6 @@
 
   // Handle store cache miss.
   __ bind(&miss);
-  __ Move(rcx, Handle<String>(name));  // restore name
   Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
@@ -1550,16 +1535,15 @@
   __ Cmp(rcx, Handle<String>(name));
   __ j(not_equal, &miss);
 
-  // Get the object from the stack.
-  __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+  // Get the receiver from the stack.
+  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
 
-  // Generate store field code.  Trashes the name register.
+  // Generate store field code.  Preserves receiver and name on jump to miss.
   GenerateStoreField(masm(),
-                     Builtins::KeyedStoreIC_ExtendStorage,
                      object,
                      index,
                      transition,
-                     rbx, rcx, rdx,
+                     rdx, rcx, rbx,
                      &miss);
 
   // Handle store cache miss.
@@ -1665,7 +1649,11 @@
                                        Register holder_reg,
                                        Register scratch,
                                        String* name,
+                                       int save_at_depth,
                                        Label* miss) {
+  // TODO(602): support object saving.
+  ASSERT(save_at_depth == kInvalidProtoDepth);
+
   // Check that the maps haven't changed.
   Register result =
       __ CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index cb93d5d..a0e883c 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -45,7 +45,7 @@
     : elements_(parameter_count() + local_count() + kPreallocatedElements),
       stack_pointer_(parameter_count() + 1) {  // 0-based index of TOS.
   for (int i = 0; i <= stack_pointer_; i++) {
-    elements_.Add(FrameElement::MemoryElement());
+    elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
   }
   for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
     register_locations_[i] = kIllegalIndex;
@@ -193,25 +193,25 @@
 }
 
 
-void VirtualFrame::EmitPush(Register reg) {
+void VirtualFrame::EmitPush(Register reg, NumberInfo::Type info) {
   ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement());
+  elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
   __ push(reg);
 }
 
 
-void VirtualFrame::EmitPush(const Operand& operand) {
+void VirtualFrame::EmitPush(const Operand& operand, NumberInfo::Type info) {
   ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement());
+  elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
   __ push(operand);
 }
 
 
-void VirtualFrame::EmitPush(Immediate immediate) {
+void VirtualFrame::EmitPush(Immediate immediate, NumberInfo::Type info) {
   ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement());
+  elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
   __ push(immediate);
 }
@@ -219,7 +219,7 @@
 
 void VirtualFrame::EmitPush(Smi* smi_value) {
   ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement());
+  elements_.Add(FrameElement::MemoryElement(NumberInfo::kSmi));
   stack_pointer_++;
   __ Push(smi_value);
 }
@@ -227,15 +227,21 @@
 
 void VirtualFrame::EmitPush(Handle<Object> value) {
   ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement());
+  NumberInfo::Type info = NumberInfo::kUnknown;
+  if (value->IsSmi()) {
+    info = NumberInfo::kSmi;
+  } else if (value->IsHeapNumber()) {
+    info = NumberInfo::kHeapNumber;
+  }
+  elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
   __ Push(value);
 }
 
 
-void VirtualFrame::EmitPush(Heap::RootListIndex index) {
+void VirtualFrame::EmitPush(Heap::RootListIndex index, NumberInfo::Type info) {
   ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement());
+  elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
   __ PushRoot(index);
 }
@@ -305,10 +311,14 @@
   // Set the new backing element.
   if (elements_[new_backing_index].is_synced()) {
     elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg, FrameElement::SYNCED);
+        FrameElement::RegisterElement(backing_reg,
+                                      FrameElement::SYNCED,
+                                      original.number_info());
   } else {
     elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
+        FrameElement::RegisterElement(backing_reg,
+                                      FrameElement::NOT_SYNCED,
+                                      original.number_info());
   }
   // Update the other copies.
   for (int i = new_backing_index + 1; i < element_count(); i++) {
@@ -339,7 +349,8 @@
       ASSERT(fresh.is_valid());
       FrameElement new_element =
           FrameElement::RegisterElement(fresh.reg(),
-                                        FrameElement::NOT_SYNCED);
+                                        FrameElement::NOT_SYNCED,
+                                        original.number_info());
       Use(fresh.reg(), element_count());
       elements_.Add(new_element);
       __ movq(fresh.reg(), Operand(rbp, fp_relative(index)));
@@ -480,10 +491,12 @@
   for (int i = 0; i < element_count(); i++) {
     FrameElement element = elements_[i];
 
+    // In all cases we have to reset the number type information
+    // to unknown for a mergable frame because of incoming back edges.
     if (element.is_constant() || element.is_copy()) {
       if (element.is_synced()) {
         // Just spill.
-        elements_[i] = FrameElement::MemoryElement();
+        elements_[i] = FrameElement::MemoryElement(NumberInfo::kUnknown);
       } else {
         // Allocate to a register.
         FrameElement backing_element;  // Invalid if not a copy.
@@ -494,7 +507,8 @@
         ASSERT(fresh.is_valid());  // A register was spilled if all were in use.
         elements_[i] =
             FrameElement::RegisterElement(fresh.reg(),
-                                          FrameElement::NOT_SYNCED);
+                                          FrameElement::NOT_SYNCED,
+                                          NumberInfo::kUnknown);
         Use(fresh.reg(), i);
 
         // Emit a move.
@@ -523,6 +537,7 @@
       // The copy flag is not relied on before the end of this loop,
       // including when registers are spilled.
       elements_[i].clear_copied();
+      elements_[i].set_number_info(NumberInfo::kUnknown);
     }
   }
 }
@@ -728,6 +743,14 @@
   int index = element_count();
   ASSERT(element.is_valid());
 
+  // Get number type information of the result.
+  NumberInfo::Type info;
+  if (!element.is_copy()) {
+    info = element.number_info();
+  } else {
+    info = elements_[element.index()].number_info();
+  }
+
   bool pop_needed = (stack_pointer_ == index);
   if (pop_needed) {
     stack_pointer_--;
@@ -735,6 +758,7 @@
       Result temp = cgen()->allocator()->Allocate();
       ASSERT(temp.is_valid());
       __ pop(temp.reg());
+      temp.set_number_info(info);
       return temp;
     }
 
@@ -762,14 +786,16 @@
     ASSERT(temp.is_valid());
     Use(temp.reg(), index);
     FrameElement new_element =
-        FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
+        FrameElement::RegisterElement(temp.reg(),
+                                      FrameElement::SYNCED,
+                                      element.number_info());
     // Preserve the copy flag on the element.
     if (element.is_copied()) new_element.set_copied();
     elements_[index] = new_element;
     __ movq(temp.reg(), Operand(rbp, fp_relative(index)));
-    return Result(temp.reg());
+    return Result(temp.reg(), info);
   } else if (element.is_register()) {
-    return Result(element.reg());
+    return Result(element.reg(), info);
   } else {
     ASSERT(element.is_constant());
     return Result(element.handle());
@@ -969,6 +995,17 @@
 }
 
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void VirtualFrame::DebugBreak() {
+  PrepareForCall(0, 0);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ DebugBreak();
+  Result result = cgen()->allocator()->Allocate(rax);
+  ASSERT(result.is_valid());
+}
+#endif
+
+
 Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
   // Name and receiver are on the top of the frame.  The IC expects
   // name in rcx and receiver on the stack.  It does not drop the
@@ -996,7 +1033,6 @@
   // expects value in rax and key and receiver on the stack.  It does
   // not drop the key and receiver.
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-  // TODO(1222589): Make the IC grab the values from the stack.
   Result value = Pop();
   PrepareForCall(2, 0);  // Two stack args, neither callee-dropped.
   value.ToRegister(rax);
@@ -1008,14 +1044,17 @@
 Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
                                 int arg_count,
                                 int loop_nesting) {
-  // Arguments, receiver, and function name are on top of the frame.
-  // The IC expects them on the stack.  It does not drop the function
-  // name slot (but it does drop the rest).
+  // Function name, arguments, and receiver are found on top of the frame
+  // and dropped by the call.  The IC expects the name in rcx and the rest
+  // on the stack, and drops them all.
   InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop);
+  Result name = Pop();
   // Spill args, receiver, and function.  The call will drop args and
   // receiver.
-  PrepareForCall(arg_count + 2, arg_count + 1);
+  PrepareForCall(arg_count + 1, arg_count + 1);
+  name.ToRegister(rcx);
+  name.Unuse();
   return RawCallCodeObject(ic, mode);
 }
 
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 8e3e40f..c9aa799 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -28,6 +28,7 @@
 #ifndef V8_X64_VIRTUAL_FRAME_X64_H_
 #define V8_X64_VIRTUAL_FRAME_X64_H_
 
+#include "number-info.h"
 #include "register-allocator.h"
 #include "scopes.h"
 
@@ -81,7 +82,8 @@
   MacroAssembler* masm() { return cgen()->masm(); }
 
   // Create a duplicate of an existing valid frame element.
-  FrameElement CopyElementAt(int index);
+  FrameElement CopyElementAt(int index,
+    NumberInfo::Type info = NumberInfo::kUninitialized);
 
   // The number of elements on the virtual frame.
   int element_count() { return elements_.length(); }
@@ -321,6 +323,10 @@
   Result CallRuntime(Runtime::Function* f, int arg_count);
   Result CallRuntime(Runtime::FunctionId id, int arg_count);
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  void DebugBreak();
+#endif
+
   // Invoke builtin given the number of arguments it expects on (and
   // removes from) the stack.
   Result InvokeBuiltin(Builtins::JavaScript id,
@@ -343,9 +349,9 @@
   // of the frame.  Key and receiver are not dropped.
   Result CallKeyedStoreIC();
 
-  // Call call IC.  Arguments, receiver, and function name are found
-  // on top of the frame.  Function name slot is not dropped.  The
-  // argument count does not include the receiver.
+  // Call call IC.  Function name, arguments, and receiver are found on top
+  // of the frame and dropped by the call.
+  // The argument count does not include the receiver.
   Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
 
   // Allocate and call JS function as constructor.  Arguments,
@@ -376,16 +382,20 @@
 
   // Push an element on top of the expression stack and emit a
   // corresponding push instruction.
-  void EmitPush(Register reg);
-  void EmitPush(const Operand& operand);
-  void EmitPush(Heap::RootListIndex index);
-  void EmitPush(Immediate immediate);
+  void EmitPush(Register reg,
+                NumberInfo::Type info = NumberInfo::kUnknown);
+  void EmitPush(const Operand& operand,
+                NumberInfo::Type info = NumberInfo::kUnknown);
+  void EmitPush(Heap::RootListIndex index,
+                NumberInfo::Type info = NumberInfo::kUnknown);
+  void EmitPush(Immediate immediate,
+                NumberInfo::Type info = NumberInfo::kUnknown);
   void EmitPush(Smi* value);
   // Uses kScratchRegister, emits appropriate relocation info.
   void EmitPush(Handle<Object> value);
 
   // Push an element on the virtual frame.
-  void Push(Register reg);
+  void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
   void Push(Handle<Object> value);
   void Push(Smi* value) { Push(Handle<Object>(value)); }
 
@@ -393,7 +403,7 @@
   // frame).
   void Push(Result* result) {
     if (result->is_register()) {
-      Push(result->reg());
+      Push(result->reg(), result->number_info());
     } else {
       ASSERT(result->is_constant());
       Push(result->handle());