update V8 to r5532 as required by WebKit r68651

Change-Id: I5f75eeffbf64b30dd5080348528d277f293490ad
diff --git a/src/api.cc b/src/api.cc
index 0d01fcc..5f480c9 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -767,6 +767,12 @@
 }
 
 
+#define SET_FIELD_WRAPPED(obj, setter, cdata) do {  \
+    i::Handle<i::Object> proxy = FromCData(cdata);  \
+    (obj)->setter(*proxy);                          \
+  } while (false)
+
+
 void FunctionTemplate::SetCallHandler(InvocationCallback callback,
                                       v8::Handle<Value> data) {
   if (IsDeadCheck("v8::FunctionTemplate::SetCallHandler()")) return;
@@ -776,7 +782,7 @@
       i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
   i::Handle<i::CallHandlerInfo> obj =
       i::Handle<i::CallHandlerInfo>::cast(struct_obj);
-  obj->set_callback(*FromCData(callback));
+  SET_FIELD_WRAPPED(obj, set_callback, callback);
   if (data.IsEmpty()) data = v8::Undefined();
   obj->set_data(*Utils::OpenHandle(*data));
   Utils::OpenHandle(this)->set_call_code(*obj);
@@ -792,8 +798,8 @@
       v8::PropertyAttribute attributes) {
   i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
   ASSERT(getter != NULL);
-  obj->set_getter(*FromCData(getter));
-  obj->set_setter(*FromCData(setter));
+  SET_FIELD_WRAPPED(obj, set_getter, getter);
+  SET_FIELD_WRAPPED(obj, set_setter, setter);
   if (data.IsEmpty()) data = v8::Undefined();
   obj->set_data(*Utils::OpenHandle(*data));
   obj->set_name(*Utils::OpenHandle(*name));
@@ -877,11 +883,13 @@
       i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
   i::Handle<i::InterceptorInfo> obj =
       i::Handle<i::InterceptorInfo>::cast(struct_obj);
-  if (getter != 0) obj->set_getter(*FromCData(getter));
-  if (setter != 0) obj->set_setter(*FromCData(setter));
-  if (query != 0) obj->set_query(*FromCData(query));
-  if (remover != 0) obj->set_deleter(*FromCData(remover));
-  if (enumerator != 0) obj->set_enumerator(*FromCData(enumerator));
+
+  if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
+  if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
+  if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+  if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
+  if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
+
   if (data.IsEmpty()) data = v8::Undefined();
   obj->set_data(*Utils::OpenHandle(*data));
   Utils::OpenHandle(this)->set_named_property_handler(*obj);
@@ -905,11 +913,13 @@
       i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
   i::Handle<i::InterceptorInfo> obj =
       i::Handle<i::InterceptorInfo>::cast(struct_obj);
-  if (getter != 0) obj->set_getter(*FromCData(getter));
-  if (setter != 0) obj->set_setter(*FromCData(setter));
-  if (query != 0) obj->set_query(*FromCData(query));
-  if (remover != 0) obj->set_deleter(*FromCData(remover));
-  if (enumerator != 0) obj->set_enumerator(*FromCData(enumerator));
+
+  if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
+  if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
+  if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+  if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
+  if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
+
   if (data.IsEmpty()) data = v8::Undefined();
   obj->set_data(*Utils::OpenHandle(*data));
   Utils::OpenHandle(this)->set_indexed_property_handler(*obj);
@@ -928,7 +938,7 @@
       i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
   i::Handle<i::CallHandlerInfo> obj =
       i::Handle<i::CallHandlerInfo>::cast(struct_obj);
-  obj->set_callback(*FromCData(callback));
+  SET_FIELD_WRAPPED(obj, set_callback, callback);
   if (data.IsEmpty()) data = v8::Undefined();
   obj->set_data(*Utils::OpenHandle(*data));
   Utils::OpenHandle(this)->set_instance_call_handler(*obj);
@@ -1043,8 +1053,10 @@
       i::Factory::NewStruct(i::ACCESS_CHECK_INFO_TYPE);
   i::Handle<i::AccessCheckInfo> info =
       i::Handle<i::AccessCheckInfo>::cast(struct_info);
-  info->set_named_callback(*FromCData(named_callback));
-  info->set_indexed_callback(*FromCData(indexed_callback));
+
+  SET_FIELD_WRAPPED(info, set_named_callback, named_callback);
+  SET_FIELD_WRAPPED(info, set_indexed_callback, indexed_callback);
+
   if (data.IsEmpty()) data = v8::Undefined();
   info->set_data(*Utils::OpenHandle(*data));
 
@@ -2646,8 +2658,9 @@
     return;
   }
   i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
-  self->set_map(
-      *i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map())));
+  i::Handle<i::Map> slow_map =
+      i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map()));
+  self->set_map(*slow_map);
   self->set_elements(*pixels);
 }
 
@@ -2701,8 +2714,9 @@
   }
   i::Handle<i::ExternalArray> array =
       i::Factory::NewExternalArray(length, array_type, data);
-  self->set_map(
-      *i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map())));
+  i::Handle<i::Map> slow_map =
+      i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map()));
+  self->set_map(*slow_map);
   self->set_elements(*array);
 }
 
@@ -4433,7 +4447,7 @@
 
 unsigned CpuProfileNode::GetCallUid() const {
   IsDeadCheck("v8::CpuProfileNode::GetCallUid");
-  return reinterpret_cast<const i::ProfileNode*>(this)->entry()->call_uid();
+  return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid();
 }
 
 
@@ -4739,6 +4753,23 @@
 }
 
 
+void HeapSnapshot::Serialize(OutputStream* stream,
+                             HeapSnapshot::SerializationFormat format) const {
+  IsDeadCheck("v8::HeapSnapshot::Serialize");
+  ApiCheck(format == kJSON,
+           "v8::HeapSnapshot::Serialize",
+           "Unknown serialization format");
+  ApiCheck(stream->GetOutputEncoding() == OutputStream::kAscii,
+           "v8::HeapSnapshot::Serialize",
+           "Unsupported output encoding");
+  ApiCheck(stream->GetChunkSize() > 0,
+           "v8::HeapSnapshot::Serialize",
+           "Invalid stream chunk size");
+  i::HeapSnapshotJSONSerializer serializer(ToInternal(this));
+  serializer.Serialize(stream);
+}
+
+
 int HeapProfiler::GetSnapshotsCount() {
   IsDeadCheck("v8::HeapProfiler::GetSnapshotsCount");
   return i::HeapProfiler::GetSnapshotsCount();
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index be9aa92..1c4fd60 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -69,13 +69,13 @@
 //
 // Core register
 struct Register {
-  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
-  bool is(Register reg) const  { return code_ == reg.code_; }
-  int code() const  {
+  bool is_valid() const { return 0 <= code_ && code_ < 16; }
+  bool is(Register reg) const { return code_ == reg.code_; }
+  int code() const {
     ASSERT(is_valid());
     return code_;
   }
-  int bit() const  {
+  int bit() const {
     ASSERT(is_valid());
     return 1 << code_;
   }
@@ -110,17 +110,17 @@
 
 // Single word VFP register.
 struct SwVfpRegister {
-  bool is_valid() const  { return 0 <= code_ && code_ < 32; }
-  bool is(SwVfpRegister reg) const  { return code_ == reg.code_; }
-  int code() const  {
+  bool is_valid() const { return 0 <= code_ && code_ < 32; }
+  bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
+  int code() const {
     ASSERT(is_valid());
     return code_;
   }
-  int bit() const  {
+  int bit() const {
     ASSERT(is_valid());
     return 1 << code_;
   }
-  void split_code(int* vm, int* m) const  {
+  void split_code(int* vm, int* m) const {
     ASSERT(is_valid());
     *m = code_ & 0x1;
     *vm = code_ >> 1;
@@ -133,31 +133,31 @@
 // Double word VFP register.
 struct DwVfpRegister {
   // Supporting d0 to d15, can be later extended to d31.
-  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
-  bool is(DwVfpRegister reg) const  { return code_ == reg.code_; }
-  SwVfpRegister low() const  {
+  bool is_valid() const { return 0 <= code_ && code_ < 16; }
+  bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+  SwVfpRegister low() const {
     SwVfpRegister reg;
     reg.code_ = code_ * 2;
 
     ASSERT(reg.is_valid());
     return reg;
   }
-  SwVfpRegister high() const  {
+  SwVfpRegister high() const {
     SwVfpRegister reg;
     reg.code_ = (code_ * 2) + 1;
 
     ASSERT(reg.is_valid());
     return reg;
   }
-  int code() const  {
+  int code() const {
     ASSERT(is_valid());
     return code_;
   }
-  int bit() const  {
+  int bit() const {
     ASSERT(is_valid());
     return 1 << code_;
   }
-  void split_code(int* vm, int* m) const  {
+  void split_code(int* vm, int* m) const {
     ASSERT(is_valid());
     *m = (code_ & 0x10) >> 4;
     *vm = code_ & 0x0F;
@@ -222,13 +222,13 @@
 
 // Coprocessor register
 struct CRegister {
-  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
-  bool is(CRegister creg) const  { return code_ == creg.code_; }
-  int code() const  {
+  bool is_valid() const { return 0 <= code_ && code_ < 16; }
+  bool is(CRegister creg) const { return code_ == creg.code_; }
+  int code() const {
     ASSERT(is_valid());
     return code_;
   }
-  int bit() const  {
+  int bit() const {
     ASSERT(is_valid());
     return 1 << code_;
   }
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 8b21558..cf2f426 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -521,7 +521,11 @@
 
 
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function) {
+                                           bool is_api_function,
+                                           bool count_constructions) {
+  // Should never count constructions for api objects.
+  ASSERT(!is_api_function || !count_constructions);
+
   // Enter a construct frame.
   __ EnterConstructFrame();
 
@@ -530,9 +534,6 @@
   __ push(r0);  // Smi-tagged arguments count.
   __ push(r1);  // Constructor function.
 
-  // Use r7 for holding undefined which is used in several places below.
-  __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-
   // Try to allocate the object without transitioning into C code. If any of the
   // preconditions is not met, the code bails out to the runtime call.
   Label rt_call, allocated;
@@ -549,7 +550,6 @@
 
     // Load the initial map and verify that it is in fact a map.
     // r1: constructor function
-    // r7: undefined value
     __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
     __ tst(r2, Operand(kSmiTagMask));
     __ b(eq, &rt_call);
@@ -561,14 +561,35 @@
     // instance type would be JS_FUNCTION_TYPE.
     // r1: constructor function
     // r2: initial map
-    // r7: undefined value
     __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
     __ b(eq, &rt_call);
 
+    if (count_constructions) {
+      Label allocate;
+      // Decrease generous allocation count.
+      __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+      MemOperand constructor_count =
+          FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
+      __ ldrb(r4, constructor_count);
+      __ sub(r4, r4, Operand(1), SetCC);
+      __ strb(r4, constructor_count);
+      __ b(ne, &allocate);
+
+      __ Push(r1, r2);
+
+      __ push(r1);  // constructor
+      // The call will replace the stub, so the countdown is only done once.
+      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+      __ pop(r2);
+      __ pop(r1);
+
+      __ bind(&allocate);
+    }
+
     // Now allocate the JSObject on the heap.
     // r1: constructor function
     // r2: initial map
-    // r7: undefined value
     __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
     __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
 
@@ -578,7 +599,6 @@
     // r2: initial map
     // r3: object size
     // r4: JSObject (not tagged)
-    // r7: undefined value
     __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
     __ mov(r5, r4);
     ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
@@ -588,16 +608,21 @@
     ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
     __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
 
-    // Fill all the in-object properties with undefined.
+    // Fill all the in-object properties with the appropriate filler.
     // r1: constructor function
     // r2: initial map
     // r3: object size (in words)
     // r4: JSObject (not tagged)
     // r5: First in-object property of JSObject (not tagged)
-    // r7: undefined value
     __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
     ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
     { Label loop, entry;
+      if (count_constructions) {
+        // To allow for truncation.
+        __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
+      } else {
+        __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+      }
       __ b(&entry);
       __ bind(&loop);
       __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
@@ -617,7 +642,6 @@
     // r1: constructor function
     // r4: JSObject
     // r5: start of next object (not tagged)
-    // r7: undefined value
     __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
     // The field instance sizes contains both pre-allocated property fields and
     // in-object properties.
@@ -637,7 +661,6 @@
     // r3: number of elements in properties array
     // r4: JSObject
     // r5: start of next object
-    // r7: undefined value
     __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
     __ AllocateInNewSpace(
         r0,
@@ -652,7 +675,6 @@
     // r3: number of elements in properties array
     // r4: JSObject
     // r5: FixedArray (not tagged)
-    // r7: undefined value
     __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
     __ mov(r2, r5);
     ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
@@ -667,10 +689,16 @@
     // r3: number of elements in properties array
     // r4: JSObject
     // r5: FixedArray (not tagged)
-    // r7: undefined
     __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
     ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
     { Label loop, entry;
+      if (count_constructions) {
+        __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+      } else if (FLAG_debug_code) {
+        __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+        __ cmp(r7, r8);
+        __ Assert(eq, "Undefined value not loaded.");
+      }
       __ b(&entry);
       __ bind(&loop);
       __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
@@ -822,13 +850,18 @@
 }
 
 
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, true);
+}
+
+
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false);
+  Generate_JSConstructStubHelper(masm, false, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, true);
+  Generate_JSConstructStubHelper(masm, true, false);
 }
 
 
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index fa93030..8f801cf 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -930,6 +930,24 @@
   Label slow;  // Call builtin.
   Label not_smis, both_loaded_as_doubles, lhs_not_nan;
 
+  if (include_smi_compare_) {
+    Label not_two_smis, smi_done;
+    __ orr(r2, r1, r0);
+    __ tst(r2, Operand(kSmiTagMask));
+    __ b(ne, &not_two_smis);
+    __ sub(r0, r1, r0);
+    __ b(vc, &smi_done);
+    // Correct the sign in case of overflow.
+    __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
+    __ bind(&smi_done);
+    __ Ret();
+    __ bind(&not_two_smis);
+  } else if (FLAG_debug_code) {
+    __ orr(r2, r1, r0);
+    __ tst(r2, Operand(kSmiTagMask));
+    __ Assert(nz, "CompareStub: unexpected smi operands.");
+  }
+
   // NOTICE! This code is only reached after a smi-fast-case check, so
   // it is certain that at least one operand isn't a smi.
 
@@ -2288,7 +2306,7 @@
   __ push(r0);
   __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
 
-  __ StubReturn(1);
+  __ Ret();
 }
 
 
@@ -2299,32 +2317,37 @@
   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
 
   if (op_ == Token::SUB) {
-    // Check whether the value is a smi.
-    Label try_float;
-    __ tst(r0, Operand(kSmiTagMask));
-    __ b(ne, &try_float);
+    if (include_smi_code_) {
+      // Check whether the value is a smi.
+      Label try_float;
+      __ tst(r0, Operand(kSmiTagMask));
+      __ b(ne, &try_float);
 
-    // Go slow case if the value of the expression is zero
-    // to make sure that we switch between 0 and -0.
-    if (negative_zero_ == kStrictNegativeZero) {
-      // If we have to check for zero, then we can check for the max negative
-      // smi while we are at it.
-      __ bic(ip, r0, Operand(0x80000000), SetCC);
-      __ b(eq, &slow);
-      __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
-      __ StubReturn(1);
-    } else {
-      // The value of the expression is a smi and 0 is OK for -0.  Try
-      // optimistic subtraction '0 - value'.
-      __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
-      __ StubReturn(1, vc);
-      // We don't have to reverse the optimistic neg since the only case
-      // where we fall through is the minimum negative Smi, which is the case
-      // where the neg leaves the register unchanged.
-      __ jmp(&slow);  // Go slow on max negative Smi.
+      // Go slow case if the value of the expression is zero
+      // to make sure that we switch between 0 and -0.
+      if (negative_zero_ == kStrictNegativeZero) {
+        // If we have to check for zero, then we can check for the max negative
+        // smi while we are at it.
+        __ bic(ip, r0, Operand(0x80000000), SetCC);
+        __ b(eq, &slow);
+        __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
+        __ Ret();
+      } else {
+        // The value of the expression is a smi and 0 is OK for -0.  Try
+        // optimistic subtraction '0 - value'.
+        __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
+        __ Ret(vc);
+        // We don't have to reverse the optimistic neg since the only case
+        // where we fall through is the minimum negative Smi, which is the case
+        // where the neg leaves the register unchanged.
+        __ jmp(&slow);  // Go slow on max negative Smi.
+      }
+      __ bind(&try_float);
+    } else if (FLAG_debug_code) {
+      __ tst(r0, Operand(kSmiTagMask));
+      __ Assert(ne, "Unexpected smi operand.");
     }
 
-    __ bind(&try_float);
     __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
     __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
     __ cmp(r1, heap_number_map);
@@ -2344,6 +2367,19 @@
       __ mov(r0, Operand(r1));
     }
   } else if (op_ == Token::BIT_NOT) {
+    if (include_smi_code_) {
+      Label non_smi;
+      __ BranchOnNotSmi(r0, &non_smi);
+      __ mvn(r0, Operand(r0));
+      // Bit-clear inverted smi-tag.
+      __ bic(r0, r0, Operand(kSmiTagMask));
+      __ Ret();
+      __ bind(&non_smi);
+    } else if (FLAG_debug_code) {
+      __ tst(r0, Operand(kSmiTagMask));
+      __ Assert(ne, "Unexpected smi operand.");
+    }
+
     // Check if the operand is a heap number.
     __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
     __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
@@ -2391,7 +2427,7 @@
   }
 
   __ bind(&done);
-  __ StubReturn(1);
+  __ Ret();
 
   // Handle the slow case by jumping to the JavaScript builtin.
   __ bind(&slow);
@@ -3499,6 +3535,11 @@
     include_number_compare_name = "_NO_NUMBER";
   }
 
+  const char* include_smi_compare_name = "";
+  if (!include_smi_compare_) {
+    include_smi_compare_name = "_NO_SMI";
+  }
+
   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
                "CompareStub_%s%s%s%s%s%s",
                cc_name,
@@ -3506,7 +3547,8 @@
                rhs_name,
                strict_name,
                never_nan_nan_name,
-               include_number_compare_name);
+               include_number_compare_name,
+               include_smi_compare_name);
   return name_;
 }
 
@@ -3522,7 +3564,8 @@
          | RegisterField::encode(lhs_.is(r0))
          | StrictField::encode(strict_)
          | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
-         | IncludeNumberCompareField::encode(include_number_compare_);
+         | IncludeNumberCompareField::encode(include_number_compare_)
+         | IncludeSmiCompareField::encode(include_smi_compare_);
 }
 
 
@@ -4144,17 +4187,21 @@
 
 
   // Check bounds and smi-ness.
-  __ ldr(r7, MemOperand(sp, kToOffset));
-  __ ldr(r6, MemOperand(sp, kFromOffset));
+  Register to = r6;
+  Register from = r7;
+  __ Ldrd(to, from, MemOperand(sp, kToOffset));
+  STATIC_ASSERT(kFromOffset == kToOffset + 4);
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   // I.e., arithmetic shift right by one un-smi-tags.
-  __ mov(r2, Operand(r7, ASR, 1), SetCC);
-  __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
-  // If either r2 or r6 had the smi tag bit set, then carry is set now.
+  __ mov(r2, Operand(to, ASR, 1), SetCC);
+  __ mov(r3, Operand(from, ASR, 1), SetCC, cc);
+  // If either to or from had the smi tag bit set, then carry is set now.
   __ b(cs, &runtime);  // Either "from" or "to" is not a smi.
   __ b(mi, &runtime);  // From is negative.
 
+  // Both to and from are smis.
+
   __ sub(r2, r2, Operand(r3), SetCC);
   __ b(mi, &runtime);  // Fail if from > to.
   // Special handling of sub-strings of length 1 and 2. One character strings
@@ -4165,8 +4212,8 @@
 
   // r2: length
   // r3: from index (untaged smi)
-  // r6: from (smi)
-  // r7: to (smi)
+  // r6 (a.k.a. to): to (smi)
+  // r7 (a.k.a. from): from offset (smi)
 
   // Make sure first argument is a sequential (or flat) string.
   __ ldr(r5, MemOperand(sp, kStringOffset));
@@ -4178,10 +4225,10 @@
 
   // r1: instance type
   // r2: length
-  // r3: from index (untaged smi)
+  // r3: from index (untagged smi)
   // r5: string
-  // r6: from (smi)
-  // r7: to (smi)
+  // r6 (a.k.a. to): to (smi)
+  // r7 (a.k.a. from): from offset (smi)
   Label seq_string;
   __ and_(r4, r1, Operand(kStringRepresentationMask));
   STATIC_ASSERT(kSeqStringTag < kConsStringTag);
@@ -4207,17 +4254,18 @@
   // r2: length
   // r3: from index (untaged smi)
   // r5: string
-  // r6: from (smi)
-  // r7: to (smi)
+  // r6 (a.k.a. to): to (smi)
+  // r7 (a.k.a. from): from offset (smi)
   __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
-  __ cmp(r4, Operand(r7));
+  __ cmp(r4, Operand(to));
   __ b(lt, &runtime);  // Fail if to > length.
+  to = no_reg;
 
   // r1: instance type.
   // r2: result string length.
   // r3: from index (untaged smi)
   // r5: string.
-  // r6: from offset (smi)
+  // r7 (a.k.a. from): from offset (smi)
   // Check for flat ascii string.
   Label non_ascii_flat;
   __ tst(r1, Operand(kStringEncodingMask));
@@ -4259,12 +4307,12 @@
   // r0: result string.
   // r2: result string length.
   // r5: string.
-  // r6: from offset (smi)
+  // r7 (a.k.a. from): from offset (smi)
   // Locate first character of result.
   __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // Locate 'from' character of string.
   __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ add(r5, r5, Operand(r6, ASR, 1));
+  __ add(r5, r5, Operand(from, ASR, 1));
 
   // r0: result string.
   // r1: first character of result string.
@@ -4280,7 +4328,7 @@
   __ bind(&non_ascii_flat);
   // r2: result string length.
   // r5: string.
-  // r6: from offset (smi)
+  // r7 (a.k.a. from): from offset (smi)
   // Check for flat two byte string.
 
   // Allocate the result.
@@ -4292,18 +4340,19 @@
   // Locate first character of result.
   __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // Locate 'from' character of string.
-    __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // As "from" is a smi it is 2 times the value which matches the size of a two
   // byte character.
-  __ add(r5, r5, Operand(r6));
+  __ add(r5, r5, Operand(from));
+  from = no_reg;
 
   // r0: result string.
   // r1: first character of result.
   // r2: result length.
   // r5: first character of string to copy.
   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
-                                           DEST_ALWAYS_ALIGNED);
+  StringHelper::GenerateCopyCharactersLong(
+      masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
   __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
   __ add(sp, sp, Operand(3 * kPointerSize));
   __ Ret();
@@ -4379,8 +4428,7 @@
   // Stack frame on entry.
   //  sp[0]: right string
   //  sp[4]: left string
-  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // left
-  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // right
+  __ Ldrd(r0 , r1, MemOperand(sp));  // Load right in r0, left in r1.
 
   Label not_same;
   __ cmp(r0, r1);
@@ -4395,12 +4443,12 @@
   __ bind(&not_same);
 
   // Check that both objects are sequential ascii strings.
-  __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
+  __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
 
   // Compare flat ascii strings natively. Remove arguments from stack first.
   __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
   __ add(sp, sp, Operand(2 * kPointerSize));
-  GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
+  GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
 
   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index f985fb4..d273e75 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -246,7 +246,7 @@
       frame_->AssertIsSpilled();
       for (int i = 0; i < scope()->num_parameters(); i++) {
         Variable* par = scope()->parameter(i);
-        Slot* slot = par->slot();
+        Slot* slot = par->AsSlot();
         if (slot != NULL && slot->type() == Slot::CONTEXT) {
           ASSERT(!scope()->is_global_scope());  // No params in global scope.
           __ ldr(r1, frame_->ParameterAt(i));
@@ -270,7 +270,7 @@
     // Initialize ThisFunction reference if present.
     if (scope()->is_function_scope() && scope()->function() != NULL) {
       frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
-      StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
+      StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
     }
 
     // Initialize the function return target after the locals are set
@@ -608,24 +608,24 @@
     frame_->EmitPush(r0);
   }
 
-  Variable* arguments = scope()->arguments()->var();
-  Variable* shadow = scope()->arguments_shadow()->var();
-  ASSERT(arguments != NULL && arguments->slot() != NULL);
-  ASSERT(shadow != NULL && shadow->slot() != NULL);
+  Variable* arguments = scope()->arguments();
+  Variable* shadow = scope()->arguments_shadow();
+  ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
+  ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
   JumpTarget done;
   if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
     // We have to skip storing into the arguments slot if it has
     // already been written to. This can happen if the a function
     // has a local variable named 'arguments'.
-    LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+    LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
     Register arguments = frame_->PopToRegister();
     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
     __ cmp(arguments, ip);
     done.Branch(ne);
   }
-  StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+  StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
   if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
-  StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+  StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
 }
 
 
@@ -641,10 +641,10 @@
     Property property(&global, &key, RelocInfo::kNoPosition);
     Reference ref(this, &property);
     ref.GetValue();
-  } else if (variable != NULL && variable->slot() != NULL) {
+  } else if (variable != NULL && variable->AsSlot() != NULL) {
     // For a variable that rewrites to a slot, we signal it is the immediate
     // subexpression of a typeof.
-    LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+    LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
   } else {
     // Anything else can be handled normally.
     Load(expr);
@@ -695,7 +695,7 @@
       LoadGlobal();
       ref->set_type(Reference::NAMED);
     } else {
-      ASSERT(var->slot() != NULL);
+      ASSERT(var->AsSlot() != NULL);
       ref->set_type(Reference::SLOT);
     }
   } else {
@@ -1651,7 +1651,7 @@
     // Perform non-smi comparison by stub.
     // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
     // We call with 0 args because there are 0 on the stack.
-    CompareStub stub(cc, strict, kBothCouldBeNaN, true, lhs, rhs);
+    CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
     frame_->CallStub(&stub, 0);
     __ cmp(r0, Operand(0, RelocInfo::NONE));
     exit.Jump();
@@ -1718,7 +1718,7 @@
   // Load the receiver and the existing arguments object onto the
   // expression stack. Avoid allocating the arguments object here.
   Load(receiver);
-  LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+  LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
 
   // At this point the top two stack elements are probably in registers
   // since they were just loaded.  Ensure they are in regs and get the
@@ -1950,7 +1950,7 @@
   Comment cmnt(masm_, "[ Declaration");
   Variable* var = node->proxy()->var();
   ASSERT(var != NULL);  // must have been resolved
-  Slot* slot = var->slot();
+  Slot* slot = var->AsSlot();
 
   // If it was not possible to allocate the variable at compile time,
   // we need to "declare" it at runtime to make sure it actually
@@ -2480,8 +2480,8 @@
   // the bottom check of the loop condition.
   TypeInfoCodeGenState type_info_scope(this,
                                        node->is_fast_smi_loop() ?
-                                           node->loop_variable()->slot() :
-                                           NULL,
+                                       node->loop_variable()->AsSlot() :
+                                       NULL,
                                        TypeInfo::Smi());
 
   // If there is no update statement, label the top of the loop with the
@@ -2794,8 +2794,8 @@
 
   // Store the caught exception in the catch variable.
   Variable* catch_var = node->catch_var()->var();
-  ASSERT(catch_var != NULL && catch_var->slot() != NULL);
-  StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
+  ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
+  StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
 
   // Remove the exception from the stack.
   frame_->Drop();
@@ -3420,7 +3420,7 @@
 
   } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
     frame_->SpillAll();
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
     Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
     if (potential_slot != NULL) {
       // Generate fast case for locals that rewrite to slots.
@@ -3449,7 +3449,7 @@
           // variables. Then load the argument from the arguments
           // object using keyed load.
           __ ldr(r0,
-                 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
                                                    r1,
                                                    r2,
                                                    slow));
@@ -3735,7 +3735,7 @@
   Comment cmnt(masm(), "[ Variable Assignment");
   Variable* var = node->target()->AsVariableProxy()->AsVariable();
   ASSERT(var != NULL);
-  Slot* slot = var->slot();
+  Slot* slot = var->AsSlot();
   ASSERT(slot != NULL);
 
   // Evaluate the right-hand side.
@@ -4136,14 +4136,14 @@
     // in generated code. If we succeed, there is no need to perform a
     // context lookup in the runtime system.
     JumpTarget done;
-    if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
-      ASSERT(var->slot()->type() == Slot::LOOKUP);
+    if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+      ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
       JumpTarget slow;
       // Prepare the stack for the call to
       // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
       // function, the first argument to the eval call and the
       // receiver.
-      LoadFromGlobalSlotCheckExtensions(var->slot(),
+      LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
                                         NOT_INSIDE_TYPEOF,
                                         &slow);
       frame_->EmitPush(r0);
@@ -4225,8 +4225,8 @@
     __ ldr(cp, frame_->Context());
     frame_->EmitPush(r0);
 
-  } else if (var != NULL && var->slot() != NULL &&
-             var->slot()->type() == Slot::LOOKUP) {
+  } else if (var != NULL && var->AsSlot() != NULL &&
+             var->AsSlot()->type() == Slot::LOOKUP) {
     // ----------------------------------
     // JavaScript examples:
     //
@@ -4244,7 +4244,7 @@
     // Generate fast case for loading functions from slots that
     // correspond to local/global variables or arguments unless they
     // are shadowed by eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(var->slot(),
+    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
                                     NOT_INSIDE_TYPEOF,
                                     &slow,
                                     &done);
@@ -5928,7 +5928,7 @@
       frame_->EmitPush(r0);
 
     } else if (variable != NULL) {
-      Slot* slot = variable->slot();
+      Slot* slot = variable->AsSlot();
       if (variable->is_global()) {
         LoadGlobal();
         frame_->EmitPush(Operand(variable->name()));
@@ -5985,6 +5985,7 @@
         GenericUnaryOpStub stub(
             Token::SUB,
             overwrite,
+            NO_UNARY_FLAGS,
             no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
         frame_->CallStub(&stub, 0);
         frame_->EmitPush(r0);  // r0 has result
@@ -6009,7 +6010,9 @@
         not_smi_label.Bind();
         frame_->SpillAll();
         __ Move(r0, tos);
-        GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+        GenericUnaryOpStub stub(Token::BIT_NOT,
+                                overwrite,
+                                NO_UNARY_SMI_CODE_IN_STUB);
         frame_->CallStub(&stub, 0);
         frame_->EmitPush(r0);
 
@@ -6059,7 +6062,7 @@
   bool is_const = (var != NULL && var->mode() == Variable::CONST);
   bool is_slot = (var != NULL && var->mode() == Variable::VAR);
 
-  if (!is_const && is_slot && type_info(var->slot()).IsSmi()) {
+  if (!is_const && is_slot && type_info(var->AsSlot()).IsSmi()) {
     // The type info declares that this variable is always a Smi.  That
     // means it is a Smi both before and after the increment/decrement.
     // Lets make use of that to make a very minimal count.
@@ -7204,7 +7207,7 @@
   switch (type_) {
     case SLOT: {
       Comment cmnt(masm, "[ Load from Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
       ASSERT(slot != NULL);
       DupIfPersist();
       cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
@@ -7248,7 +7251,7 @@
   switch (type_) {
     case SLOT: {
       Comment cmnt(masm, "[ Store to Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
       cgen_->StoreToSlot(slot, init_state);
       set_unloaded();
       break;
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 162d97f..1483c0b 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -271,10 +271,6 @@
 
   void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
 
-  // If the name is an inline runtime function call return the number of
-  // expected arguments. Otherwise return -1.
-  static int InlineRuntimeCallArgumentsCount(Handle<String> name);
-
   // Constants related to patching of inlined load/store.
   static int GetInlinedKeyedLoadInstructionsAfterPatch() {
     return FLAG_debug_code ? 32 : 13;
@@ -290,6 +286,12 @@
   }
 
  private:
+  // Type of a member function that generates inline code for a native function.
+  typedef void (CodeGenerator::*InlineFunctionGenerator)
+      (ZoneList<Expression*>*);
+
+  static const InlineFunctionGenerator kInlineFunctionGenerators[];
+
   // Construction/Destruction
   explicit CodeGenerator(MacroAssembler* masm);
 
@@ -305,9 +307,9 @@
   int NumberOfSlot(Slot* slot);
 
   // State
-  bool has_cc() const  { return cc_reg_ != al; }
-  JumpTarget* true_target() const  { return state_->true_target(); }
-  JumpTarget* false_target() const  { return state_->false_target(); }
+  bool has_cc() const { return cc_reg_ != al; }
+  JumpTarget* true_target() const { return state_->true_target(); }
+  JumpTarget* false_target() const { return state_->false_target(); }
 
   // Track loop nesting level.
   int loop_nesting() const { return loop_nesting_; }
@@ -447,13 +449,9 @@
   void Branch(bool if_true, JumpTarget* target);
   void CheckStack();
 
-  struct InlineRuntimeLUT {
-    void (CodeGenerator::*method)(ZoneList<Expression*>*);
-    const char* name;
-    int nargs;
-  };
+  static InlineFunctionGenerator FindInlineFunctionGenerator(
+      Runtime::FunctionId function_id);
 
-  static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
   bool CheckForInlineRuntimeCall(CallRuntime* node);
 
   static Handle<Code> ComputeLazyCompile(int argc);
@@ -599,8 +597,6 @@
   // Size of inlined write barriers generated by EmitNamedStore.
   static int inlined_write_barrier_size_;
 
-  static InlineRuntimeLUT kInlineRuntimeLUT[];
-
   friend class VirtualFrame;
   friend class JumpTarget;
   friend class Reference;
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index 4743439..b0c0990 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -37,17 +37,8 @@
 namespace internal {
 
 
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
-  if (fp == 0) return NONE;
-  // Compute frame type and stack pointer.
-  Address sp = fp + ExitFrameConstants::kSPOffset;
-
-  // Fill in the state.
-  state->sp = sp;
-  state->fp = fp;
-  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
-  ASSERT(*state->pc_address != NULL);
-  return EXIT;
+Address ExitFrame::ComputeStackPointer(Address fp) {
+  return fp + ExitFrameConstants::kSPOffset;
 }
 
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 0f8f6d4..9fc0c09 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -100,7 +100,7 @@
     // Copy any necessary parameters into the context.
     int num_parameters = scope()->num_parameters();
     for (int i = 0; i < num_parameters; i++) {
-      Slot* slot = scope()->parameter(i)->slot();
+      Slot* slot = scope()->parameter(i)->AsSlot();
       if (slot != NULL && slot->type() == Slot::CONTEXT) {
         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
             (num_parameters - 1 - i) * kPointerSize;
@@ -118,7 +118,7 @@
     }
   }
 
-  Variable* arguments = scope()->arguments()->AsVariable();
+  Variable* arguments = scope()->arguments();
   if (arguments != NULL) {
     // Function uses arguments object.
     Comment cmnt(masm_, "[ Allocate arguments object");
@@ -143,9 +143,8 @@
     __ CallStub(&stub);
     // Duplicate the value; move-to-slot operation might clobber registers.
     __ mov(r3, r0);
-    Move(arguments->slot(), r0, r1, r2);
-    Slot* dot_arguments_slot =
-        scope()->arguments_shadow()->AsVariable()->slot();
+    Move(arguments->AsSlot(), r0, r1, r2);
+    Slot* dot_arguments_slot = scope()->arguments_shadow()->AsSlot();
     Move(dot_arguments_slot, r3, r1, r2);
   }
 
@@ -253,205 +252,202 @@
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
+void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
+}
 
-    case Expression::kEffect:
-      // Nothing to do.
-      break;
 
-    case Expression::kValue:
-      // Move value into place.
-      switch (location_) {
-        case kAccumulator:
-          if (!reg.is(result_register())) __ mov(result_register(), reg);
-          break;
-        case kStack:
-          __ push(reg);
-          break;
-      }
-      break;
+void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
+  codegen()->Move(result_register(), slot);
+}
 
-    case Expression::kTest:
-      // For simplicity we always test the accumulator register.
-      if (!reg.is(result_register())) __ mov(result_register(), reg);
-      DoTest(true_label_, false_label_, fall_through_);
-      break;
+
+void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
+  codegen()->Move(result_register(), slot);
+  __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
+  // For simplicity we always test the accumulator register.
+  codegen()->Move(result_register(), slot);
+  codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Heap::RootListIndex index) const {
+  __ LoadRoot(result_register(), index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Heap::RootListIndex index) const {
+  __ LoadRoot(result_register(), index);
+  __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+  if (index == Heap::kUndefinedValueRootIndex ||
+      index == Heap::kNullValueRootIndex ||
+      index == Heap::kFalseValueRootIndex) {
+    __ b(false_label_);
+  } else if (index == Heap::kTrueValueRootIndex) {
+    __ b(true_label_);
+  } else {
+    __ LoadRoot(result_register(), index);
+    codegen()->DoTest(true_label_, false_label_, fall_through_);
   }
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-    case Expression::kEffect:
-      // Nothing to do.
-      break;
-    case Expression::kValue:
-    case Expression::kTest:
-      // On ARM we have to move the value into a register to do anything
-      // with it.
-      Move(result_register(), slot);
-      Apply(context, result_register());
-      break;
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Handle<Object> lit) const {
+  __ mov(result_register(), Operand(lit));
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+  // Immediates can be pushed directly.
+  __ mov(result_register(), Operand(lit));
+  __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+    __ b(false_label_);
+  } else if (lit->IsTrue() || lit->IsJSObject()) {
+    __ b(true_label_);
+  } else if (lit->IsString()) {
+    if (String::cast(*lit)->length() == 0) {
+      __ b(false_label_);
+    } else {
+      __ b(true_label_);
+    }
+  } else if (lit->IsSmi()) {
+    if (Smi::cast(*lit)->value() == 0) {
+      __ b(false_label_);
+    } else {
+      __ b(true_label_);
+    }
+  } else {
+    // For simplicity we always test the accumulator register.
+    __ mov(result_register(), Operand(lit));
+    codegen()->DoTest(true_label_, false_label_, fall_through_);
   }
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-    case Expression::kEffect:
-      break;
-      // Nothing to do.
-    case Expression::kValue:
-    case Expression::kTest:
-      // On ARM we have to move the value into a register to do anything
-      // with it.
-      __ mov(result_register(), Operand(lit->handle()));
-      Apply(context, result_register());
-      break;
-  }
-}
-
-
-void FullCodeGenerator::ApplyTOS(Expression::Context context) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-
-    case Expression::kEffect:
-      __ Drop(1);
-      break;
-
-    case Expression::kValue:
-      switch (location_) {
-        case kAccumulator:
-          __ pop(result_register());
-          break;
-        case kStack:
-          break;
-      }
-      break;
-
-    case Expression::kTest:
-      __ pop(result_register());
-      DoTest(true_label_, false_label_, fall_through_);
-      break;
-  }
-}
-
-
-void FullCodeGenerator::DropAndApply(int count,
-                                     Expression::Context context,
-                                     Register reg) {
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+                                                   Register reg) const {
   ASSERT(count > 0);
-  ASSERT(!reg.is(sp));
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-
-    case Expression::kEffect:
-      __ Drop(count);
-      break;
-
-    case Expression::kValue:
-      switch (location_) {
-        case kAccumulator:
-          __ Drop(count);
-          if (!reg.is(result_register())) __ mov(result_register(), reg);
-          break;
-        case kStack:
-          if (count > 1) __ Drop(count - 1);
-          __ str(reg, MemOperand(sp));
-          break;
-      }
-      break;
-
-    case Expression::kTest:
-      __ Drop(count);
-      if (!reg.is(result_register())) __ mov(result_register(), reg);
-      DoTest(true_label_, false_label_, fall_through_);
-      break;
-  }
+  __ Drop(count);
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context,
-                              Label* materialize_true,
-                              Label* materialize_false) {
-  switch (context) {
-    case Expression::kUninitialized:
-
-    case Expression::kEffect:
-      ASSERT_EQ(materialize_true, materialize_false);
-      __ bind(materialize_true);
-      break;
-
-    case Expression::kValue: {
-      Label done;
-      switch (location_) {
-        case kAccumulator:
-          __ bind(materialize_true);
-          __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
-          __ jmp(&done);
-          __ bind(materialize_false);
-          __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
-          break;
-        case kStack:
-          __ bind(materialize_true);
-          __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-          __ push(ip);
-          __ jmp(&done);
-          __ bind(materialize_false);
-          __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-          __ push(ip);
-          break;
-      }
-      __ bind(&done);
-      break;
-    }
-
-    case Expression::kTest:
-      break;
-  }
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+    int count,
+    Register reg) const {
+  ASSERT(count > 0);
+  __ Drop(count);
+  __ Move(result_register(), reg);
 }
 
 
-// Convert constant control flow (true or false) to the result expected for
-// a given expression context.
-void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-      break;
-    case Expression::kEffect:
-      break;
-    case Expression::kValue: {
-      Heap::RootListIndex value_root_index =
-          flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
-      switch (location_) {
-        case kAccumulator:
-          __ LoadRoot(result_register(), value_root_index);
-          break;
-        case kStack:
-          __ LoadRoot(ip, value_root_index);
-          __ push(ip);
-          break;
-      }
-      break;
-    }
-    case Expression::kTest:
-      if (flag) {
-        if (true_label_ != fall_through_) __ b(true_label_);
-      } else {
-        if (false_label_ != fall_through_) __ b(false_label_);
-      }
-      break;
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+                                                       Register reg) const {
+  ASSERT(count > 0);
+  if (count > 1) __ Drop(count - 1);
+  __ str(reg, MemOperand(sp, 0));
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+                                                 Register reg) const {
+  ASSERT(count > 0);
+  // For simplicity we always test the accumulator register.
+  __ Drop(count);
+  __ Move(result_register(), reg);
+  codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+                                            Label* materialize_false) const {
+  ASSERT_EQ(materialize_true, materialize_false);
+  __ bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Label* materialize_true,
+    Label* materialize_false) const {
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+  __ jmp(&done);
+  __ bind(materialize_false);
+  __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Label* materialize_true,
+    Label* materialize_false) const {
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+  __ push(ip);
+  __ jmp(&done);
+  __ bind(materialize_false);
+  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+  __ push(ip);
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+                                          Label* materialize_false) const {
+  ASSERT(materialize_false == false_label_);
+  ASSERT(materialize_true == true_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(ip, value_root_index);
+  __ push(ip);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  if (flag) {
+    if (true_label_ != fall_through_) __ b(true_label_);
+  } else {
+    if (false_label_ != fall_through_) __ b(false_label_);
   }
 }
 
@@ -533,7 +529,7 @@
                                         FunctionLiteral* function) {
   Comment cmnt(masm_, "[ Declaration");
   ASSERT(variable != NULL);  // Must have been resolved.
-  Slot* slot = variable->slot();
+  Slot* slot = variable->AsSlot();
   Property* prop = variable->AsProperty();
 
   if (slot != NULL) {
@@ -544,7 +540,7 @@
           __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
           __ str(ip, MemOperand(fp, SlotOffset(slot)));
         } else if (function != NULL) {
-          VisitForValue(function, kAccumulator);
+          VisitForAccumulatorValue(function);
           __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
         }
         break;
@@ -566,7 +562,7 @@
           __ str(ip, ContextOperand(cp, slot->index()));
           // No write barrier since the_hole_value is in old space.
         } else if (function != NULL) {
-          VisitForValue(function, kAccumulator);
+          VisitForAccumulatorValue(function);
           __ str(result_register(), ContextOperand(cp, slot->index()));
           int offset = Context::SlotOffset(slot->index());
           // We know that we have written a function, which is not a smi.
@@ -593,7 +589,7 @@
         } else if (function != NULL) {
           __ Push(cp, r2, r1);
           // Push initial value for function declaration.
-          VisitForValue(function, kStack);
+          VisitForStackValue(function);
         } else {
           __ mov(r0, Operand(Smi::FromInt(0)));  // No initial value!
           __ Push(cp, r2, r1, r0);
@@ -607,20 +603,20 @@
     if (function != NULL || mode == Variable::CONST) {
       // We are declaring a function or constant that rewrites to a
       // property.  Use (keyed) IC to set the initial value.
-      VisitForValue(prop->obj(), kStack);
+      VisitForStackValue(prop->obj());
       if (function != NULL) {
-        VisitForValue(prop->key(), kStack);
-        VisitForValue(function, kAccumulator);
+        VisitForStackValue(prop->key());
+        VisitForAccumulatorValue(function);
         __ pop(r1);  // Key.
       } else {
-        VisitForValue(prop->key(), kAccumulator);
+        VisitForAccumulatorValue(prop->key());
         __ mov(r1, result_register());  // Key.
         __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
       }
       __ pop(r2);  // Receiver.
 
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ Call(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       // Value in r0 is ignored (declarations are statements).
     }
   }
@@ -648,7 +644,7 @@
   Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
   // Keep the switch value on the stack until a case matches.
-  VisitForValue(stmt->tag(), kStack);
+  VisitForStackValue(stmt->tag());
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -668,11 +664,12 @@
     next_test.Unuse();
 
     // Compile the label expression.
-    VisitForValue(clause->label(), kAccumulator);
+    VisitForAccumulatorValue(clause->label());
 
     // Perform the comparison as if via '==='.
     __ ldr(r1, MemOperand(sp, 0));  // Switch value.
-    if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+    bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+    if (inline_smi_code) {
       Label slow_case;
       __ orr(r2, r1, r0);
       __ tst(r2, Operand(kSmiTagMask));
@@ -684,7 +681,10 @@
     __ bind(&slow_case);
     }
 
-    CompareStub stub(eq, true, kBothCouldBeNaN, true, r1, r0);
+    CompareFlags flags = inline_smi_code
+        ? NO_SMI_COMPARE_IN_STUB
+        : NO_COMPARE_FLAGS;
+    CompareStub stub(eq, true, flags, r1, r0);
     __ CallStub(&stub);
     __ cmp(r0, Operand(0, RelocInfo::NONE));
     __ b(ne, &next_test);
@@ -725,7 +725,7 @@
   // Get the object to enumerate over. Both SpiderMonkey and JSC
   // ignore null and undefined in contrast to the specification; see
   // ECMA-262 section 12.6.4.
-  VisitForValue(stmt->enumerable(), kAccumulator);
+  VisitForAccumulatorValue(stmt->enumerable());
   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   __ cmp(r0, ip);
   __ b(eq, &exit);
@@ -868,13 +868,13 @@
     __ Push(cp, r0);
     __ CallRuntime(Runtime::kNewClosure, 2);
   }
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
-  EmitVariableLoad(expr->var(), context_);
+  EmitVariableLoad(expr->var());
 }
 
 
@@ -923,7 +923,7 @@
     EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
     __ jmp(done);
   } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
     Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
     if (potential_slot != NULL) {
       // Generate fast case for locals that rewrite to slots.
@@ -948,11 +948,11 @@
           // variables. Then load the argument from the arguments
           // object using keyed load.
           __ ldr(r1,
-                 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
                                                    slow));
           __ mov(r0, Operand(key_literal->handle()));
           Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-          __ Call(ic, RelocInfo::CODE_TARGET);
+          EmitCallIC(ic, RelocInfo::CODE_TARGET);
           __ jmp(done);
         }
       }
@@ -1018,16 +1018,15 @@
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ Call(ic, mode);
+  EmitCallIC(ic, mode);
 }
 
 
-void FullCodeGenerator::EmitVariableLoad(Variable* var,
-                                         Expression::Context context) {
+void FullCodeGenerator::EmitVariableLoad(Variable* var) {
   // Four cases: non-this global variables, lookup slots, all other
   // types of slots, and parameters that rewrite to explicit property
   // accesses on the arguments object.
-  Slot* slot = var->slot();
+  Slot* slot = var->AsSlot();
   Property* property = var->AsProperty();
 
   if (var->is_global() && !var->is_this()) {
@@ -1037,8 +1036,8 @@
     __ ldr(r0, CodeGenerator::GlobalObject());
     __ mov(r2, Operand(var->name()));
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-    Apply(context, r0);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    context()->Plug(r0);
 
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
     Label done, slow;
@@ -1054,24 +1053,24 @@
     __ CallRuntime(Runtime::kLoadContextSlot, 2);
     __ bind(&done);
 
-    Apply(context, r0);
+    context()->Plug(r0);
 
   } else if (slot != NULL) {
     Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
                             ? "Context slot"
                             : "Stack slot");
     if (var->mode() == Variable::CONST) {
-       // Constants may be the hole value if they have not been initialized.
-       // Unhole them.
-       MemOperand slot_operand = EmitSlotSearch(slot, r0);
-       __ ldr(r0, slot_operand);
-       __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-       __ cmp(r0, ip);
-       __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
-       Apply(context, r0);
-     } else {
-       Apply(context, slot);
-     }
+      // Constants may be the hole value if they have not been initialized.
+      // Unhole them.
+      MemOperand slot_operand = EmitSlotSearch(slot, r0);
+      __ ldr(r0, slot_operand);
+      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+      __ cmp(r0, ip);
+      __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+      context()->Plug(r0);
+    } else {
+      context()->Plug(slot);
+    }
   } else {
     Comment cmnt(masm_, "Rewritten parameter");
     ASSERT_NOT_NULL(property);
@@ -1080,7 +1079,7 @@
     // Assert that the object is in a slot.
     Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
     ASSERT_NOT_NULL(object_var);
-    Slot* object_slot = object_var->slot();
+    Slot* object_slot = object_var->AsSlot();
     ASSERT_NOT_NULL(object_slot);
 
     // Load the object.
@@ -1096,8 +1095,8 @@
 
     // Call keyed load IC. It has arguments key and receiver in r0 and r1.
     Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    Apply(context, r0);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    context()->Plug(r0);
   }
 }
 
@@ -1141,7 +1140,7 @@
   // r2: temp.
   __ pop(r1);
   __ CopyFields(r0, r1, r2.bit(), size / kPointerSize);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -1181,11 +1180,11 @@
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
-          VisitForValue(value, kAccumulator);
+          VisitForAccumulatorValue(value);
           __ mov(r2, Operand(key->handle()));
           __ ldr(r1, MemOperand(sp));
           Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-          __ Call(ic, RelocInfo::CODE_TARGET);
+          EmitCallIC(ic, RelocInfo::CODE_TARGET);
           break;
         }
         // Fall through.
@@ -1193,8 +1192,8 @@
         // Duplicate receiver on stack.
         __ ldr(r0, MemOperand(sp));
         __ push(r0);
-        VisitForValue(key, kStack);
-        VisitForValue(value, kStack);
+        VisitForStackValue(key);
+        VisitForStackValue(value);
         __ CallRuntime(Runtime::kSetProperty, 3);
         break;
       case ObjectLiteral::Property::GETTER:
@@ -1202,21 +1201,21 @@
         // Duplicate receiver on stack.
         __ ldr(r0, MemOperand(sp));
         __ push(r0);
-        VisitForValue(key, kStack);
+        VisitForStackValue(key);
         __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
                            Smi::FromInt(1) :
                            Smi::FromInt(0)));
         __ push(r1);
-        VisitForValue(value, kStack);
+        VisitForStackValue(value);
         __ CallRuntime(Runtime::kDefineAccessor, 4);
         break;
     }
   }
 
   if (result_saved) {
-    ApplyTOS(context_);
+    context()->PlugTOS();
   } else {
-    Apply(context_, r0);
+    context()->Plug(r0);
   }
 }
 
@@ -1264,7 +1263,7 @@
       __ push(r0);
       result_saved = true;
     }
-    VisitForValue(subexpr, kAccumulator);
+    VisitForAccumulatorValue(subexpr);
 
     // Store the subexpression value in the array's elements.
     __ ldr(r1, MemOperand(sp));  // Copy of array literal.
@@ -1278,9 +1277,9 @@
   }
 
   if (result_saved) {
-    ApplyTOS(context_);
+    context()->PlugTOS();
   } else {
-    Apply(context_, r0);
+    context()->Plug(r0);
   }
 }
 
@@ -1313,39 +1312,38 @@
     case NAMED_PROPERTY:
       if (expr->is_compound()) {
         // We need the receiver both on the stack and in the accumulator.
-        VisitForValue(property->obj(), kAccumulator);
+        VisitForAccumulatorValue(property->obj());
         __ push(result_register());
       } else {
-        VisitForValue(property->obj(), kStack);
+        VisitForStackValue(property->obj());
       }
       break;
     case KEYED_PROPERTY:
       if (expr->is_compound()) {
-        VisitForValue(property->obj(), kStack);
-        VisitForValue(property->key(), kAccumulator);
+        VisitForStackValue(property->obj());
+        VisitForAccumulatorValue(property->key());
         __ ldr(r1, MemOperand(sp, 0));
         __ push(r0);
       } else {
-        VisitForValue(property->obj(), kStack);
-        VisitForValue(property->key(), kStack);
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
       }
       break;
   }
 
   if (expr->is_compound()) {
-    Location saved_location = location_;
-    location_ = kAccumulator;
-    switch (assign_type) {
-      case VARIABLE:
-        EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
-                         Expression::kValue);
-        break;
-      case NAMED_PROPERTY:
-        EmitNamedPropertyLoad(property);
-        break;
-      case KEYED_PROPERTY:
-        EmitKeyedPropertyLoad(property);
-        break;
+    { AccumulatorValueContext context(this);
+      switch (assign_type) {
+        case VARIABLE:
+          EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          break;
+        case NAMED_PROPERTY:
+          EmitNamedPropertyLoad(property);
+          break;
+        case KEYED_PROPERTY:
+          EmitKeyedPropertyLoad(property);
+          break;
+      }
     }
 
     Token::Value op = expr->binary_op();
@@ -1355,28 +1353,26 @@
     ASSERT(constant == kRightConstant || constant == kNoConstants);
     if (constant == kNoConstants) {
       __ push(r0);  // Left operand goes on the stack.
-      VisitForValue(expr->value(), kAccumulator);
+      VisitForAccumulatorValue(expr->value());
     }
 
     OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
         ? OVERWRITE_RIGHT
         : NO_OVERWRITE;
     SetSourcePosition(expr->position() + 1);
+    AccumulatorValueContext context(this);
     if (ShouldInlineSmiCase(op)) {
       EmitInlineSmiBinaryOp(expr,
                             op,
-                            Expression::kValue,
                             mode,
                             expr->target(),
                             expr->value(),
                             constant);
     } else {
-      EmitBinaryOp(op, Expression::kValue, mode);
+      EmitBinaryOp(op, mode);
     }
-    location_ = saved_location;
-
   } else {
-    VisitForValue(expr->value(), kAccumulator);
+    VisitForAccumulatorValue(expr->value());
   }
 
   // Record source position before possible IC call.
@@ -1386,8 +1382,7 @@
   switch (assign_type) {
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             expr->op(),
-                             context_);
+                             expr->op());
       break;
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
@@ -1405,7 +1400,7 @@
   __ mov(r2, Operand(key->handle()));
   // Call load IC. It has arguments receiver and property name r0 and r2.
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 }
 
 
@@ -1413,29 +1408,27 @@
   SetSourcePosition(prop->position());
   // Call keyed load IC. It has arguments key and receiver in r0 and r1.
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 }
 
 
 void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
                                               Token::Value op,
-                                              Expression::Context context,
                                               OverwriteMode mode,
                                               Expression* left,
                                               Expression* right,
                                               ConstantOperand constant) {
   ASSERT(constant == kNoConstants);  // Only handled case.
-  EmitBinaryOp(op, context, mode);
+  EmitBinaryOp(op, mode);
 }
 
 
 void FullCodeGenerator::EmitBinaryOp(Token::Value op,
-                                     Expression::Context context,
                                      OverwriteMode mode) {
   __ pop(r1);
   GenericBinaryOpStub stub(op, mode, r1, r0);
   __ CallStub(&stub);
-  Apply(context, r0);
+  context()->Plug(r0);
 }
 
 
@@ -1461,28 +1454,29 @@
   switch (assign_type) {
     case VARIABLE: {
       Variable* var = expr->AsVariableProxy()->var();
-      EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect);
+      EffectContext context(this);
+      EmitVariableAssignment(var, Token::ASSIGN);
       break;
     }
     case NAMED_PROPERTY: {
       __ push(r0);  // Preserve value.
-      VisitForValue(prop->obj(), kAccumulator);
+      VisitForAccumulatorValue(prop->obj());
       __ mov(r1, r0);
       __ pop(r0);  // Restore value.
       __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      __ Call(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
     case KEYED_PROPERTY: {
       __ push(r0);  // Preserve value.
-      VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kAccumulator);
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
       __ mov(r1, r0);
       __ pop(r2);
       __ pop(r0);  // Restore value.
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ Call(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
   }
@@ -1490,12 +1484,11 @@
 
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
-                                               Token::Value op,
-                                               Expression::Context context) {
+                                               Token::Value op) {
   // Left-hand sides that rewrite to explicit property accesses do not reach
   // here.
   ASSERT(var != NULL);
-  ASSERT(var->is_global() || var->slot() != NULL);
+  ASSERT(var->is_global() || var->AsSlot() != NULL);
 
   if (var->is_global()) {
     ASSERT(!var->is_this());
@@ -1505,13 +1498,13 @@
     __ mov(r2, Operand(var->name()));
     __ ldr(r1, CodeGenerator::GlobalObject());
     Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
     // Perform the assignment for non-const variables and for initialization
     // of const variables.  Const assignments are simply skipped.
     Label done;
-    Slot* slot = var->slot();
+    Slot* slot = var->AsSlot();
     switch (slot->type()) {
       case Slot::PARAMETER:
       case Slot::LOCAL:
@@ -1561,7 +1554,7 @@
     __ bind(&done);
   }
 
-  Apply(context, result_register());
+  context()->Plug(result_register());
 }
 
 
@@ -1594,7 +1587,7 @@
   }
 
   Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1604,9 +1597,9 @@
     __ push(ip);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(r0);
-    DropAndApply(1, context_, r0);
+    context()->DropAndPlug(1, r0);
   } else {
-    Apply(context_, r0);
+    context()->Plug(r0);
   }
 }
 
@@ -1638,7 +1631,7 @@
   }
 
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1648,9 +1641,9 @@
     __ push(ip);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(r0);
-    DropAndApply(1, context_, r0);
+    context()->DropAndPlug(1, r0);
   } else {
-    Apply(context_, r0);
+    context()->Plug(r0);
   }
 }
 
@@ -1660,16 +1653,15 @@
   Expression* key = expr->key();
 
   if (key->IsPropertyName()) {
-    VisitForValue(expr->obj(), kAccumulator);
+    VisitForAccumulatorValue(expr->obj());
     EmitNamedPropertyLoad(expr);
-    Apply(context_, r0);
   } else {
-    VisitForValue(expr->obj(), kStack);
-    VisitForValue(expr->key(), kAccumulator);
+    VisitForStackValue(expr->obj());
+    VisitForAccumulatorValue(expr->key());
     __ pop(r1);
     EmitKeyedPropertyLoad(expr);
-    Apply(context_, r0);
   }
+  context()->Plug(r0);
 }
 
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
@@ -1679,7 +1671,7 @@
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
   __ mov(r2, Operand(name));
   // Record source position for debugger.
@@ -1687,10 +1679,10 @@
   // Call the IC initialization code.
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
-  __ Call(ic, mode);
+  EmitCallIC(ic, mode);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -1701,9 +1693,9 @@
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
-  VisitForValue(key, kAccumulator);
+  VisitForAccumulatorValue(key);
   __ mov(r2, r0);
   // Record source position for debugger.
   SetSourcePosition(expr->position());
@@ -1711,10 +1703,10 @@
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
                                                               in_loop);
-  __ Call(ic, mode);
+  EmitCallIC(ic, mode);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -1723,7 +1715,7 @@
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
@@ -1732,7 +1724,7 @@
   __ CallStub(&stub);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  DropAndApply(1, context_, r0);
+  context()->DropAndPlug(1, r0);
 }
 
 
@@ -1746,7 +1738,7 @@
     // resolve the function we need to call and the receiver of the
     // call.  Then we call the resolved function using the given
     // arguments.
-    VisitForValue(fun, kStack);
+    VisitForStackValue(fun);
     __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
     __ push(r2);  // Reserved receiver slot.
 
@@ -1754,7 +1746,7 @@
     ZoneList<Expression*>* args = expr->arguments();
     int arg_count = args->length();
     for (int i = 0; i < arg_count; i++) {
-      VisitForValue(args->at(i), kStack);
+      VisitForStackValue(args->at(i));
     }
 
     // Push copy of the function - found below the arguments.
@@ -1786,26 +1778,26 @@
     __ CallStub(&stub);
     // Restore context register.
     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-    DropAndApply(1, context_, r0);
+    context()->DropAndPlug(1, r0);
   } else if (var != NULL && !var->is_this() && var->is_global()) {
     // Push global object as receiver for the call IC.
     __ ldr(r0, CodeGenerator::GlobalObject());
     __ push(r0);
     EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
-  } else if (var != NULL && var->slot() != NULL &&
-             var->slot()->type() == Slot::LOOKUP) {
+  } else if (var != NULL && var->AsSlot() != NULL &&
+             var->AsSlot()->type() == Slot::LOOKUP) {
     // Call to a lookup slot (dynamically introduced variable).
     Label slow, done;
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    EmitDynamicLoadFromSlotFastCase(var->slot(),
+    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
                                     NOT_INSIDE_TYPEOF,
                                     &slow,
                                     &done);
 
     __ bind(&slow);
-    // Call the runtime to find the function to call (returned in eax)
+    // Call the runtime to find the function to call (returned in r0)
     // and the object holding it (returned in edx).
     __ push(context_register());
     __ mov(r2, Operand(var->name()));
@@ -1836,21 +1828,21 @@
     Literal* key = prop->key()->AsLiteral();
     if (key != NULL && key->handle()->IsSymbol()) {
       // Call to a named property, use call IC.
-      VisitForValue(prop->obj(), kStack);
+      VisitForStackValue(prop->obj());
       EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
     } else {
       // Call to a keyed property.
       // For a synthetic property use keyed load IC followed by function call,
       // for a regular property use keyed CallIC.
-      VisitForValue(prop->obj(), kStack);
+      VisitForStackValue(prop->obj());
       if (prop->is_synthetic()) {
-        VisitForValue(prop->key(), kAccumulator);
+        VisitForAccumulatorValue(prop->key());
         // Record source code position for IC call.
         SetSourcePosition(prop->position());
         __ pop(r1);  // We do not need to keep the receiver.
 
         Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-        __ Call(ic, RelocInfo::CODE_TARGET);
+        EmitCallIC(ic, RelocInfo::CODE_TARGET);
         __ ldr(r1, CodeGenerator::GlobalObject());
         __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
         __ Push(r0, r1);  // Function, receiver.
@@ -1869,7 +1861,7 @@
         loop_depth() == 0) {
       lit->set_try_full_codegen(true);
     }
-    VisitForValue(fun, kStack);
+    VisitForStackValue(fun);
     // Load global receiver object.
     __ ldr(r1, CodeGenerator::GlobalObject());
     __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
@@ -1889,13 +1881,13 @@
   // Push constructor on the stack.  If it's not a function it's used as
   // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
   // ignored.
-  VisitForValue(expr->expression(), kStack);
+  VisitForStackValue(expr->expression());
 
   // Push the arguments ("left-to-right") on the stack.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
 
   // Call the construct call builtin that handles allocation and
@@ -1908,59 +1900,59 @@
 
   Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
   __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ BranchOnSmi(r0, if_true);
   __ b(if_false);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ tst(r0, Operand(kSmiTagMask | 0x80000000));
   Split(eq, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ BranchOnSmi(r0, if_false);
   __ LoadRoot(ip, Heap::kNullValueRootIndex);
@@ -1977,41 +1969,41 @@
   __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
   Split(le, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ BranchOnSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
   Split(ge, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ BranchOnSmi(r0, if_false);
   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -2019,7 +2011,7 @@
   __ tst(r1, Operand(1 << Map::kIsUndetectable));
   Split(ne, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -2028,80 +2020,80 @@
 
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
   // used in a few functions in runtime.js which should not normally be hit by
   // this compiler.
   __ jmp(if_false);
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ BranchOnSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
   Split(eq, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ BranchOnSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
   Split(eq, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ BranchOnSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
   Split(eq, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -2113,8 +2105,8 @@
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   // Get the frame pointer for the calling frame.
   __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2132,7 +2124,7 @@
   __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
   Split(eq, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -2140,21 +2132,21 @@
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ pop(r1);
   __ cmp(r0, r1);
   Split(eq, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -2162,13 +2154,13 @@
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in edx and the formal
-  // parameter count in eax.
-  VisitForValue(args->at(0), kAccumulator);
+  // parameter count in r0.
+  VisitForAccumulatorValue(args->at(0));
   __ mov(r1, r0);
   __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
   __ CallStub(&stub);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -2190,7 +2182,7 @@
   __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
 
   __ bind(&exit);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -2198,7 +2190,7 @@
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   // If the object is a smi, we return null.
   __ BranchOnSmi(r0, &null);
@@ -2244,7 +2236,7 @@
   // All done.
   __ bind(&done);
 
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -2259,14 +2251,14 @@
   ASSERT_EQ(args->length(), 3);
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
-    VisitForValue(args->at(1), kStack);
-    VisitForValue(args->at(2), kStack);
+    VisitForStackValue(args->at(1));
+    VisitForStackValue(args->at(2));
     __ CallRuntime(Runtime::kLog, 2);
   }
 #endif
   // Finally, we're expected to leave a value on the top of the stack.
   __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -2316,7 +2308,7 @@
         ExternalReference::fill_heap_number_with_random_function(), 1);
   }
 
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -2324,11 +2316,11 @@
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
   ASSERT(args->length() == 3);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
-  VisitForValue(args->at(2), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
   __ CallStub(&stub);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -2336,19 +2328,19 @@
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
   ASSERT(args->length() == 4);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
-  VisitForValue(args->at(2), kStack);
-  VisitForValue(args->at(3), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  VisitForStackValue(args->at(3));
   __ CallStub(&stub);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);  // Load the object.
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
 
   Label done;
   // If the object is a smi return the object.
@@ -2359,25 +2351,25 @@
   __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
 
   __ bind(&done);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the runtime function.
   ASSERT(args->length() == 2);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
   __ CallRuntime(Runtime::kMath_pow, 2);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
-  VisitForValue(args->at(0), kStack);  // Load the object.
-  VisitForValue(args->at(1), kAccumulator);  // Load the value.
+  VisitForStackValue(args->at(0));  // Load the object.
+  VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(r1);  // r0 = value. r1 = object.
 
   Label done;
@@ -2395,7 +2387,7 @@
   __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
 
   __ bind(&done);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -2403,18 +2395,18 @@
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
-  VisitForValue(args->at(0), kStack);
+  VisitForStackValue(args->at(0));
 
   NumberToStringStub stub;
   __ CallStub(&stub);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label done;
   StringCharFromCodeGenerator generator(r0, r1);
@@ -2425,15 +2417,15 @@
   generator.GenerateSlow(masm_, call_helper);
 
   __ bind(&done);
-  Apply(context_, r1);
+  context()->Plug(r1);
 }
 
 
 void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
 
   Register object = r1;
   Register index = r0;
@@ -2472,15 +2464,15 @@
   generator.GenerateSlow(masm_, call_helper);
 
   __ bind(&done);
-  Apply(context_, result);
+  context()->Plug(result);
 }
 
 
 void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
 
   Register object = r1;
   Register index = r0;
@@ -2521,58 +2513,58 @@
   generator.GenerateSlow(masm_, call_helper);
 
   __ bind(&done);
-  Apply(context_, result);
+  context()->Plug(result);
 }
 
 
 void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
 
   StringAddStub stub(NO_STRING_ADD_FLAGS);
   __ CallStub(&stub);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
 
   StringCompareStub stub;
   __ CallStub(&stub);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the runtime.
   ASSERT(args->length() == 1);
-  VisitForValue(args->at(0), kStack);
+  VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sin, 1);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the runtime.
   ASSERT(args->length() == 1);
-  VisitForValue(args->at(0), kStack);
+  VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_cos, 1);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the runtime function.
   ASSERT(args->length() == 1);
-  VisitForValue(args->at(0), kStack);
+  VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -2580,38 +2572,38 @@
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // For receiver and function.
-  VisitForValue(args->at(0), kStack);  // Receiver.
+  VisitForStackValue(args->at(0));  // Receiver.
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i + 1), kStack);
+    VisitForStackValue(args->at(i + 1));
   }
-  VisitForValue(args->at(arg_count + 1), kAccumulator);  // Function.
+  VisitForAccumulatorValue(args->at(arg_count + 1));  // Function.
 
   // InvokeFunction requires function in r1. Move it in there.
   if (!result_register().is(r1)) __ mov(r1, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(r1, count, CALL_FUNCTION);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 3);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
-  VisitForValue(args->at(2), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
   __ CallRuntime(Runtime::kRegExpConstructResult, 3);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 3);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
-  VisitForValue(args->at(2), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
   __ CallRuntime(Runtime::kSwapElements, 3);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -2626,11 +2618,11 @@
   if (jsfunction_result_caches->length() <= cache_id) {
     __ Abort("Attempt to use undefined cache.");
     __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-    Apply(context_, r0);
+    context()->Plug(r0);
     return;
   }
 
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForAccumulatorValue(args->at(1));
 
   Register key = r0;
   Register cache = r1;
@@ -2662,7 +2654,7 @@
   __ CallRuntime(Runtime::kGetFromCache, 2);
 
   __ bind(&done);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -2674,8 +2666,8 @@
   Register tmp = r2;
   Register tmp2 = r3;
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
   __ pop(left);
 
   Label done, fail, ok;
@@ -2703,19 +2695,19 @@
   __ LoadRoot(r0, Heap::kTrueValueRootIndex);
   __ bind(&done);
 
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
   __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
@@ -2723,16 +2715,16 @@
   __ b(eq, if_true);
   __ b(if_false);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
   __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
   __ IndexFromHash(r0, r0);
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -2757,7 +2749,7 @@
   // Push the arguments ("left-to-right").
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
 
   if (expr->is_jsruntime()) {
@@ -2765,14 +2757,14 @@
     __ mov(r2, Operand(expr->name()));
     Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
                                                            NOT_IN_LOOP);
-    __ Call(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
     // Restore context register.
     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   } else {
     // Call the C runtime function.
     __ CallRuntime(expr->function(), arg_count);
   }
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
@@ -2786,20 +2778,20 @@
         // Result of deleting non-property, non-variable reference is true.
         // The subexpression may have side effects.
         VisitForEffect(expr->expression());
-        Apply(context_, true);
+        context()->Plug(true);
       } else if (var != NULL &&
                  !var->is_global() &&
-                 var->slot() != NULL &&
-                 var->slot()->type() != Slot::LOOKUP) {
+                 var->AsSlot() != NULL &&
+                 var->AsSlot()->type() != Slot::LOOKUP) {
         // Result of deleting non-global, non-dynamic variables is false.
         // The subexpression does not have side effects.
-        Apply(context_, false);
+        context()->Plug(false);
       } else {
         // Property or variable reference.  Call the delete builtin with
         // object and property name as arguments.
         if (prop != NULL) {
-          VisitForValue(prop->obj(), kStack);
-          VisitForValue(prop->key(), kStack);
+          VisitForStackValue(prop->obj());
+          VisitForStackValue(prop->key());
         } else if (var->is_global()) {
           __ ldr(r1, CodeGenerator::GlobalObject());
           __ mov(r0, Operand(var->name()));
@@ -2816,7 +2808,7 @@
           __ push(r2);
         }
         __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
-        Apply(context_, r0);
+        context()->Plug(r0);
       }
       break;
     }
@@ -2824,26 +2816,7 @@
     case Token::VOID: {
       Comment cmnt(masm_, "[ UnaryOperation (VOID)");
       VisitForEffect(expr->expression());
-      switch (context_) {
-        case Expression::kUninitialized:
-          UNREACHABLE();
-          break;
-        case Expression::kEffect:
-          break;
-        case Expression::kValue:
-          __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
-          switch (location_) {
-            case kAccumulator:
-              break;
-            case kStack:
-              __ push(result_register());
-              break;
-          }
-          break;
-        case Expression::kTest:
-          __ jmp(false_label_);
-          break;
-      }
+      context()->Plug(Heap::kUndefinedValueRootIndex);
       break;
     }
 
@@ -2855,31 +2828,33 @@
       Label* fall_through = NULL;
 
       // Notice that the labels are swapped.
-      PrepareTest(&materialize_true, &materialize_false,
-                  &if_false, &if_true, &fall_through);
+      context()->PrepareTest(&materialize_true, &materialize_false,
+                             &if_false, &if_true, &fall_through);
       VisitForControl(expr->expression(), if_true, if_false, fall_through);
-      Apply(context_, if_false, if_true);  // Labels swapped.
+      context()->Plug(if_false, if_true);  // Labels swapped.
       break;
     }
 
     case Token::TYPEOF: {
       Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
-      VisitForTypeofValue(expr->expression(), kStack);
+      { StackValueContext context(this);
+        VisitForTypeofValue(expr->expression());
+      }
       __ CallRuntime(Runtime::kTypeof, 1);
-      Apply(context_, r0);
+      context()->Plug(r0);
       break;
     }
 
     case Token::ADD: {
       Comment cmt(masm_, "[ UnaryOperation (ADD)");
-      VisitForValue(expr->expression(), kAccumulator);
+      VisitForAccumulatorValue(expr->expression());
       Label no_conversion;
       __ tst(result_register(), Operand(kSmiTagMask));
       __ b(eq, &no_conversion);
       __ push(r0);
       __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
       __ bind(&no_conversion);
-      Apply(context_, result_register());
+      context()->Plug(result_register());
       break;
     }
 
@@ -2888,12 +2863,14 @@
       bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
       UnaryOverwriteMode overwrite =
           can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::SUB, overwrite);
+      GenericUnaryOpStub stub(Token::SUB,
+                              overwrite,
+                              NO_UNARY_FLAGS);
       // GenericUnaryOpStub expects the argument to be in the
       // accumulator register r0.
-      VisitForValue(expr->expression(), kAccumulator);
+      VisitForAccumulatorValue(expr->expression());
       __ CallStub(&stub);
-      Apply(context_, r0);
+      context()->Plug(r0);
       break;
     }
 
@@ -2901,9 +2878,10 @@
       Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
       // The generic unary operation stub expects the argument to be
       // in the accumulator register r0.
-      VisitForValue(expr->expression(), kAccumulator);
+      VisitForAccumulatorValue(expr->expression());
       Label done;
-      if (ShouldInlineSmiCase(expr->op())) {
+      bool inline_smi_code = ShouldInlineSmiCase(expr->op());
+      if (inline_smi_code) {
         Label call_stub;
         __ BranchOnNotSmi(r0, &call_stub);
         __ mvn(r0, Operand(r0));
@@ -2913,12 +2891,15 @@
         __ bind(&call_stub);
       }
       bool overwrite = expr->expression()->ResultOverwriteAllowed();
+      UnaryOpFlags flags = inline_smi_code
+          ? NO_UNARY_SMI_CODE_IN_STUB
+          : NO_UNARY_FLAGS;
       UnaryOverwriteMode mode =
           overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::BIT_NOT, mode);
+      GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
       __ CallStub(&stub);
       __ bind(&done);
-      Apply(context_, r0);
+      context()->Plug(r0);
       break;
     }
 
@@ -2954,25 +2935,22 @@
   // Evaluate expression and get value.
   if (assign_type == VARIABLE) {
     ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
-    Location saved_location = location_;
-    location_ = kAccumulator;
-    EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
-                     Expression::kValue);
-    location_ = saved_location;
+    AccumulatorValueContext context(this);
+    EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
   } else {
     // Reserve space for result of postfix operation.
-    if (expr->is_postfix() && context_ != Expression::kEffect) {
+    if (expr->is_postfix() && !context()->IsEffect()) {
       __ mov(ip, Operand(Smi::FromInt(0)));
       __ push(ip);
     }
     if (assign_type == NAMED_PROPERTY) {
       // Put the object both on the stack and in the accumulator.
-      VisitForValue(prop->obj(), kAccumulator);
+      VisitForAccumulatorValue(prop->obj());
       __ push(r0);
       EmitNamedPropertyLoad(prop);
     } else {
-      VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kAccumulator);
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
       __ ldr(r1, MemOperand(sp, 0));
       __ push(r0);
       EmitKeyedPropertyLoad(prop);
@@ -2988,29 +2966,21 @@
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
-    switch (context_) {
-      case Expression::kUninitialized:
-        UNREACHABLE();
-      case Expression::kEffect:
-        // Do not save result.
-        break;
-      case Expression::kValue:
-      case Expression::kTest:
-        // Save the result on the stack. If we have a named or keyed property
-        // we store the result under the receiver that is currently on top
-        // of the stack.
-        switch (assign_type) {
-          case VARIABLE:
-            __ push(r0);
-            break;
-          case NAMED_PROPERTY:
-            __ str(r0, MemOperand(sp, kPointerSize));
-            break;
-          case KEYED_PROPERTY:
-            __ str(r0, MemOperand(sp, 2 * kPointerSize));
-            break;
-        }
-        break;
+    if (!context()->IsEffect()) {
+      // Save the result on the stack. If we have a named or keyed property
+      // we store the result under the receiver that is currently on top
+      // of the stack.
+      switch (assign_type) {
+        case VARIABLE:
+          __ push(r0);
+          break;
+        case NAMED_PROPERTY:
+          __ str(r0, MemOperand(sp, kPointerSize));
+          break;
+        case KEYED_PROPERTY:
+          __ str(r0, MemOperand(sp, 2 * kPointerSize));
+          break;
+      }
     }
   }
 
@@ -3037,31 +3007,31 @@
   switch (assign_type) {
     case VARIABLE:
       if (expr->is_postfix()) {
-        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN,
-                               Expression::kEffect);
-        // For all contexts except kEffect: We have the result on
+        { EffectContext context(this);
+          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                                 Token::ASSIGN);
+        }
+        // For all contexts except EffectConstant We have the result on
         // top of the stack.
-        if (context_ != Expression::kEffect) {
-          ApplyTOS(context_);
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
         }
       } else {
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN,
-                               context_);
+                               Token::ASSIGN);
       }
       break;
     case NAMED_PROPERTY: {
       __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
       __ pop(r1);
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      __ Call(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
-        if (context_ != Expression::kEffect) {
-          ApplyTOS(context_);
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
         }
       } else {
-        Apply(context_, r0);
+        context()->Plug(r0);
       }
       break;
     }
@@ -3069,13 +3039,13 @@
       __ pop(r1);  // Key.
       __ pop(r2);  // Receiver.
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ Call(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
-        if (context_ != Expression::kEffect) {
-          ApplyTOS(context_);
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
         }
       } else {
-        Apply(context_, r0);
+        context()->Plug(r0);
       }
       break;
     }
@@ -3083,7 +3053,9 @@
 }
 
 
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+  ASSERT(!context()->IsEffect());
+  ASSERT(!context()->IsTest());
   VariableProxy* proxy = expr->AsVariableProxy();
   if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
     Comment cmnt(masm_, "Global variable");
@@ -3092,16 +3064,16 @@
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    if (where == kStack) __ push(r0);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    context()->Plug(r0);
   } else if (proxy != NULL &&
-             proxy->var()->slot() != NULL &&
-             proxy->var()->slot()->type() == Slot::LOOKUP) {
+             proxy->var()->AsSlot() != NULL &&
+             proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
     Label done, slow;
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    Slot* slot = proxy->var()->slot();
+    Slot* slot = proxy->var()->AsSlot();
     EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
 
     __ bind(&slow);
@@ -3110,10 +3082,10 @@
     __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
     __ bind(&done);
 
-    if (where == kStack) __ push(r0);
+    context()->Plug(r0);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitForValue(expr, where);
+    Visit(expr);
   }
 }
 
@@ -3135,7 +3107,9 @@
   if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
   Handle<String> check = Handle<String>::cast(right_literal_value);
 
-  VisitForTypeofValue(left_unary->expression(), kAccumulator);
+  { AccumulatorValueContext context(this);
+    VisitForTypeofValue(left_unary->expression());
+  }
   if (check->Equals(Heap::number_symbol())) {
     __ tst(r0, Operand(kSmiTagMask));
     __ b(eq, if_true);
@@ -3221,8 +3195,8 @@
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
@@ -3230,14 +3204,14 @@
   Expression* left = expr->left();
   Expression* right = expr->right();
   if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
-    Apply(context_, if_true, if_false);
+    context()->Plug(if_true, if_false);
     return;
   }
 
-  VisitForValue(expr->left(), kStack);
+  VisitForStackValue(expr->left());
   switch (op) {
     case Token::IN:
-      VisitForValue(expr->right(), kStack);
+      VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_JS);
       __ LoadRoot(ip, Heap::kTrueValueRootIndex);
       __ cmp(r0, ip);
@@ -3245,7 +3219,7 @@
       break;
 
     case Token::INSTANCEOF: {
-      VisitForValue(expr->right(), kStack);
+      VisitForStackValue(expr->right());
       InstanceofStub stub;
       __ CallStub(&stub);
       // The stub returns 0 for true.
@@ -3255,7 +3229,7 @@
     }
 
     default: {
-      VisitForValue(expr->right(), kAccumulator);
+      VisitForAccumulatorValue(expr->right());
       Condition cc = eq;
       bool strict = false;
       switch (op) {
@@ -3292,7 +3266,8 @@
           UNREACHABLE();
       }
 
-      if (ShouldInlineSmiCase(op)) {
+      bool inline_smi_code = ShouldInlineSmiCase(op);
+      if (inline_smi_code) {
         Label slow_case;
         __ orr(r2, r0, Operand(r1));
         __ BranchOnNotSmi(r2, &slow_case);
@@ -3300,8 +3275,10 @@
         Split(cc, if_true, if_false, NULL);
         __ bind(&slow_case);
       }
-
-      CompareStub stub(cc, strict, kBothCouldBeNaN, true, r1, r0);
+      CompareFlags flags = inline_smi_code
+          ? NO_SMI_COMPARE_IN_STUB
+          : NO_COMPARE_FLAGS;
+      CompareStub stub(cc, strict, flags, r1, r0);
       __ CallStub(&stub);
       __ cmp(r0, Operand(0, RelocInfo::NONE));
       Split(cc, if_true, if_false, fall_through);
@@ -3310,7 +3287,7 @@
 
   // Convert the result of the comparison into one expected for this
   // expression's context.
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -3320,10 +3297,10 @@
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
-  VisitForValue(expr->expression(), kAccumulator);
+  VisitForAccumulatorValue(expr->expression());
   __ LoadRoot(r1, Heap::kNullValueRootIndex);
   __ cmp(r0, r1);
   if (expr->is_strict()) {
@@ -3342,20 +3319,31 @@
     __ cmp(r1, Operand(1 << Map::kIsUndetectable));
     Split(eq, if_true, if_false, fall_through);
   }
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
   __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  Apply(context_, r0);
+  context()->Plug(r0);
 }
 
 
-Register FullCodeGenerator::result_register() { return r0; }
+Register FullCodeGenerator::result_register() {
+  return r0;
+}
 
 
-Register FullCodeGenerator::context_register() { return cp; }
+Register FullCodeGenerator::context_register() {
+  return cp;
+}
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+  ASSERT(mode == RelocInfo::CODE_TARGET ||
+         mode == RelocInfo::CODE_TARGET_CONTEXT);
+  __ Call(ic, mode);
+}
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 1a76db2..d5a700c 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -967,6 +967,14 @@
 }
 
 
+bool LoadIC::PatchInlinedContextualLoad(Address address,
+                                        Object* map,
+                                        Object* cell) {
+  // TODO(<bug#>): implement this.
+  return false;
+}
+
+
 bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
   // Find the end of the inlined code for the store if there is an
   // inlined version of the store.
@@ -1236,7 +1244,6 @@
   //  -- r1     : receiver
   // -----------------------------------
   Label miss;
-  Label index_out_of_range;
 
   Register receiver = r1;
   Register index = r0;
@@ -1251,7 +1258,7 @@
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
-                                          &index_out_of_range,
+                                          &miss,  // When index out of range.
                                           STRING_INDEX_IS_ARRAY_INDEX);
   char_at_generator.GenerateFast(masm);
   __ Ret();
@@ -1259,10 +1266,6 @@
   ICRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm, call_helper);
 
-  __ bind(&index_out_of_range);
-  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-  __ Ret();
-
   __ bind(&miss);
   GenerateMiss(masm);
 }
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 3554431..0e2c49e 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1242,15 +1242,6 @@
 }
 
 
-void MacroAssembler::StubReturn(int argc, Condition cond) {
-  ASSERT(argc >= 1 && generating_stub());
-  if (argc > 1) {
-    add(sp, sp, Operand((argc - 1) * kPointerSize), LeaveCC, cond);
-  }
-  Ret(cond);
-}
-
-
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
     add(sp, sp, Operand(num_arguments * kPointerSize));
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index febd87e..48a8059 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -531,9 +531,6 @@
   // Call a code stub.
   void TailCallStub(CodeStub* stub, Condition cond = al);
 
-  // Return from a code stub after popping its arguments.
-  void StubReturn(int argc, Condition cond = al);
-
   // Call a runtime routine.
   void CallRuntime(Runtime::Function* f, int num_arguments);
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 0da5f64..659f29c 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -266,7 +266,12 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype) {
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  // Check we're still in the same context.
+  __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ Move(ip, Top::global());
+  __ cmp(prototype, ip);
+  __ b(ne, miss);
   // Get the global function with the given index.
   JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
   // Load its initial map. The global functions all have initial maps.
@@ -1434,7 +1439,8 @@
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            r0);
+                                            r0,
+                                            &miss);
   ASSERT(object != holder);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
                   r1, r3, r4, name, &miss);
@@ -1505,7 +1511,8 @@
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            r0);
+                                            r0,
+                                            &miss);
   ASSERT(object != holder);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
                   r1, r3, r4, name, &miss);
@@ -1626,6 +1633,16 @@
 }
 
 
+Object* CallStubCompiler::CompileMathFloorCall(Object* object,
+                                               JSObject* holder,
+                                               JSGlobalPropertyCell* cell,
+                                               JSFunction* function,
+                                               String* name) {
+  // TODO(872): implement this.
+  return Heap::undefined_value();
+}
+
+
 Object* CallStubCompiler::CompileCallConstant(Object* object,
                                               JSObject* holder,
                                               JSFunction* function,
@@ -1705,7 +1722,7 @@
         __ b(hs, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::STRING_FUNCTION_INDEX, r0);
+            masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
                         r1, r4, name, &miss);
       }
@@ -1725,7 +1742,7 @@
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::NUMBER_FUNCTION_INDEX, r0);
+            masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
                         r1, r4, name, &miss);
       }
@@ -1748,7 +1765,7 @@
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::BOOLEAN_FUNCTION_INDEX, r0);
+            masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
                         r1, r4, name, &miss);
       }
@@ -2067,7 +2084,10 @@
                                              name,
                                              r1,
                                              &miss);
-    if (cell->IsFailure()) return cell;
+    if (cell->IsFailure()) {
+      miss.Unuse();
+      return cell;
+    }
   }
 
   // Return undefined if maps of the full prototype chain are still the
@@ -2117,7 +2137,10 @@
   Failure* failure = Failure::InternalError();
   bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
                                       callback, name, &miss, &failure);
-  if (!success) return failure;
+  if (!success) {
+    miss.Unuse();
+    return failure;
+  }
 
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2212,11 +2235,11 @@
   }
 
   __ mov(r0, r4);
-  __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
+  __ IncrementCounter(&Counters::named_load_global_stub, 1, r1, r3);
   __ Ret();
 
   __ bind(&miss);
-  __ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3);
+  __ IncrementCounter(&Counters::named_load_global_stub_miss, 1, r1, r3);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
@@ -2265,7 +2288,10 @@
   Failure* failure = Failure::InternalError();
   bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4,
                                       callback, name, &miss, &failure);
-  if (!success) return failure;
+  if (!success) {
+    miss.Unuse();
+    return failure;
+  }
 
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/src/assembler.h b/src/assembler.h
index 1577433..d28bf43 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -57,7 +57,7 @@
 
   INLINE(void Unuse())            { pos_ = 0; }
 
-  INLINE(bool is_bound()  const)  { return pos_ <  0; }
+  INLINE(bool is_bound() const)  { return pos_ <  0; }
   INLINE(bool is_unused() const)  { return pos_ == 0; }
   INLINE(bool is_linked() const)  { return pos_ >  0; }
 
@@ -92,6 +92,57 @@
 
 
 // -----------------------------------------------------------------------------
+// NearLabels are labels used for short jumps (in Intel jargon).
+// NearLabels should be used if it can be guaranteed that the jump range is
+// within -128 to +127. We already use short jumps when jumping backwards,
+// so using a NearLabel will only have performance impact if used for forward
+// jumps.
+class NearLabel BASE_EMBEDDED {
+ public:
+  NearLabel() { Unuse(); }
+  ~NearLabel() { ASSERT(!is_linked()); }
+
+  void Unuse() {
+    pos_ = -1;
+    unresolved_branches_ = 0;
+#ifdef DEBUG
+    for (int i = 0; i < kMaxUnresolvedBranches; i++) {
+      unresolved_positions_[i] = -1;
+    }
+#endif
+  }
+
+  int pos() {
+    ASSERT(is_bound());
+    return pos_;
+  }
+
+  bool is_bound() { return pos_ >= 0; }
+  bool is_linked() { return !is_bound() && unresolved_branches_ > 0; }
+  bool is_unused() { return !is_bound() && unresolved_branches_ == 0; }
+
+  void bind_to(int position) {
+    ASSERT(!is_bound());
+    pos_ = position;
+  }
+
+  void link_to(int position) {
+    ASSERT(!is_bound());
+    ASSERT(unresolved_branches_ < kMaxUnresolvedBranches);
+    unresolved_positions_[unresolved_branches_++] = position;
+  }
+
+ private:
+  static const int kMaxUnresolvedBranches = 8;
+  int pos_;
+  int unresolved_branches_;
+  int unresolved_positions_[kMaxUnresolvedBranches];
+
+  friend class Assembler;
+};
+
+
+// -----------------------------------------------------------------------------
 // Relocation information
 
 
@@ -181,10 +232,10 @@
   static inline int ModeMask(Mode mode) { return 1 << mode; }
 
   // Accessors
-  byte* pc() const  { return pc_; }
+  byte* pc() const { return pc_; }
   void set_pc(byte* pc) { pc_ = pc; }
   Mode rmode() const {  return rmode_; }
-  intptr_t data() const  { return data_; }
+  intptr_t data() const { return data_; }
 
   // Apply a relocation by delta bytes
   INLINE(void apply(intptr_t delta));
@@ -339,7 +390,7 @@
   explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
 
   // Iteration
-  bool done() const  { return done_; }
+  bool done() const { return done_; }
   void next();
 
   // Return pointer valid until next next().
diff --git a/src/ast.cc b/src/ast.cc
index 9ff1be7..f47dffd 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -70,6 +70,16 @@
 }
 
 
+VariableProxy::VariableProxy(Variable* var)
+    : name_(var->name()),
+      var_(NULL),  // Will be set by the call to BindTo.
+      is_this_(var->is_this()),
+      inside_with_(false),
+      is_trivial_(false) {
+  BindTo(var);
+}
+
+
 VariableProxy::VariableProxy(Handle<String> name,
                              bool is_this,
                              bool inside_with)
diff --git a/src/ast.h b/src/ast.h
index 9fcf256..eadb310 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -172,18 +172,6 @@
 
 class Expression: public AstNode {
  public:
-  enum Context {
-    // Not assigned a context yet, or else will not be visited during
-    // code generation.
-    kUninitialized,
-    // Evaluated for its side effects.
-    kEffect,
-    // Evaluated for its value (and side effects).
-    kValue,
-    // Evaluated for control flow (and side effects).
-    kTest
-  };
-
   Expression() : bitfields_(0) {}
 
   virtual Expression* AsExpression()  { return this; }
@@ -342,7 +330,7 @@
   void AddStatement(Statement* statement) { statements_.Add(statement); }
 
   ZoneList<Statement*>* statements() { return &statements_; }
-  bool is_initializer_block() const  { return is_initializer_block_; }
+  bool is_initializer_block() const { return is_initializer_block_; }
 
  private:
   ZoneList<Statement*> statements_;
@@ -363,9 +351,9 @@
 
   virtual void Accept(AstVisitor* v);
 
-  VariableProxy* proxy() const  { return proxy_; }
-  Variable::Mode mode() const  { return mode_; }
-  FunctionLiteral* fun() const  { return fun_; }  // may be NULL
+  VariableProxy* proxy() const { return proxy_; }
+  Variable::Mode mode() const { return mode_; }
+  FunctionLiteral* fun() const { return fun_; }  // may be NULL
 
  private:
   VariableProxy* proxy_;
@@ -466,11 +454,11 @@
 
   virtual void Accept(AstVisitor* v);
 
-  Statement* init() const  { return init_; }
+  Statement* init() const { return init_; }
   void set_init(Statement* stmt) { init_ = stmt; }
-  Expression* cond() const  { return cond_; }
+  Expression* cond() const { return cond_; }
   void set_cond(Expression* expr) { cond_ = expr; }
-  Statement* next() const  { return next_; }
+  Statement* next() const { return next_; }
   void set_next(Statement* stmt) { next_ = stmt; }
 
   bool may_have_function_literal() const {
@@ -543,7 +531,7 @@
 
   virtual void Accept(AstVisitor* v);
 
-  IterationStatement* target() const  { return target_; }
+  IterationStatement* target() const { return target_; }
 
  private:
   IterationStatement* target_;
@@ -557,7 +545,7 @@
 
   virtual void Accept(AstVisitor* v);
 
-  BreakableStatement* target() const  { return target_; }
+  BreakableStatement* target() const { return target_; }
 
  private:
   BreakableStatement* target_;
@@ -588,7 +576,7 @@
 
   virtual void Accept(AstVisitor* v);
 
-  Expression* expression() const  { return expression_; }
+  Expression* expression() const { return expression_; }
 
   bool is_catch_block() const { return is_catch_block_; }
 
@@ -610,13 +598,13 @@
  public:
   CaseClause(Expression* label, ZoneList<Statement*>* statements);
 
-  bool is_default() const  { return label_ == NULL; }
-  Expression* label() const  {
+  bool is_default() const { return label_ == NULL; }
+  Expression* label() const {
     CHECK(!is_default());
     return label_;
   }
   JumpTarget* body_target() { return &body_target_; }
-  ZoneList<Statement*>* statements() const  { return statements_; }
+  ZoneList<Statement*>* statements() const { return statements_; }
 
  private:
   Expression* label_;
@@ -636,8 +624,8 @@
 
   virtual void Accept(AstVisitor* v);
 
-  Expression* tag() const  { return tag_; }
-  ZoneList<CaseClause*>* cases() const  { return cases_; }
+  Expression* tag() const { return tag_; }
+  ZoneList<CaseClause*>* cases() const { return cases_; }
 
  private:
   Expression* tag_;
@@ -731,8 +719,8 @@
 
   virtual void Accept(AstVisitor* v);
 
-  VariableProxy* catch_var() const  { return catch_var_; }
-  Block* catch_block() const  { return catch_block_; }
+  VariableProxy* catch_var() const { return catch_var_; }
+  Block* catch_block() const { return catch_block_; }
 
  private:
   VariableProxy* catch_var_;
@@ -960,6 +948,8 @@
 
 class VariableProxy: public Expression {
  public:
+  explicit VariableProxy(Variable* var);
+
   virtual void Accept(AstVisitor* v);
 
   // Type testing & conversion
@@ -972,7 +962,10 @@
   }
 
   Variable* AsVariable() {
-    return this == NULL || var_ == NULL ? NULL : var_->AsVariable();
+    if (this == NULL || var_ == NULL) return NULL;
+    Expression* rewrite = var_->rewrite();
+    if (rewrite == NULL || rewrite->AsSlot() != NULL) return var_;
+    return NULL;
   }
 
   virtual bool IsValidLeftHandSide() {
@@ -994,10 +987,10 @@
     return (variable == NULL) ? false : variable->is_arguments();
   }
 
-  Handle<String> name() const  { return name_; }
-  Variable* var() const  { return var_; }
-  bool is_this() const  { return is_this_; }
-  bool inside_with() const  { return inside_with_; }
+  Handle<String> name() const { return name_; }
+  Variable* var() const { return var_; }
+  bool is_this() const { return is_this_; }
+  bool inside_with() const { return inside_with_; }
 
   void MarkAsTrivial() { is_trivial_ = true; }
 
@@ -1464,9 +1457,9 @@
   // Type testing & conversion
   virtual FunctionLiteral* AsFunctionLiteral()  { return this; }
 
-  Handle<String> name() const  { return name_; }
-  Scope* scope() const  { return scope_; }
-  ZoneList<Statement*>* body() const  { return body_; }
+  Handle<String> name() const { return name_; }
+  Scope* scope() const { return scope_; }
+  ZoneList<Statement*>* body() const { return body_; }
   void set_function_token_position(int pos) { function_token_position_ = pos; }
   int function_token_position() const { return function_token_position_; }
   int start_position() const { return start_position_; }
@@ -1486,7 +1479,7 @@
 
   bool AllowsLazyCompilation();
 
-  Handle<String> inferred_name() const  { return inferred_name_; }
+  Handle<String> inferred_name() const { return inferred_name_; }
   void set_inferred_name(Handle<String> inferred_name) {
     inferred_name_ = inferred_name;
   }
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 6e6c2c6..aa8d8e5 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1064,8 +1064,11 @@
   // global object.
   static const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
-  SetProperty(builtins, Factory::LookupAsciiSymbol("global"),
-              Handle<Object>(global_context()->global()), attributes);
+  Handle<String> global_symbol = Factory::LookupAsciiSymbol("global");
+  SetProperty(builtins,
+              global_symbol,
+              Handle<Object>(global_context()->global()),
+              attributes);
 
   // Setup the reference from the global object to the builtins object.
   JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
@@ -1344,33 +1347,41 @@
 }
 
 
-static void InstallCustomCallGenerator(
-    Handle<JSFunction> holder_function,
-    CallStubCompiler::CustomGeneratorOwner owner_flag,
-    const char* function_name,
-    int id) {
-  Handle<JSObject> owner;
-  if (owner_flag == CallStubCompiler::FUNCTION) {
-    owner = Handle<JSObject>::cast(holder_function);
-  } else {
-    ASSERT(owner_flag == CallStubCompiler::INSTANCE_PROTOTYPE);
-    owner = Handle<JSObject>(
-        JSObject::cast(holder_function->instance_prototype()));
+static Handle<JSObject> ResolveCustomCallGeneratorHolder(
+    Handle<Context> global_context,
+    const char* holder_expr) {
+  Handle<GlobalObject> global(global_context->global());
+  const char* period_pos = strchr(holder_expr, '.');
+  if (period_pos == NULL) {
+    return Handle<JSObject>::cast(
+        GetProperty(global, Factory::LookupAsciiSymbol(holder_expr)));
   }
+  ASSERT_EQ(".prototype", period_pos);
+  Vector<const char> property(holder_expr,
+                              static_cast<int>(period_pos - holder_expr));
+  Handle<JSFunction> function = Handle<JSFunction>::cast(
+      GetProperty(global, Factory::LookupSymbol(property)));
+  return Handle<JSObject>(JSObject::cast(function->prototype()));
+}
+
+
+static void InstallCustomCallGenerator(Handle<JSObject> holder,
+                                       const char* function_name,
+                                       int id) {
   Handle<String> name = Factory::LookupAsciiSymbol(function_name);
-  Handle<JSFunction> function(JSFunction::cast(owner->GetProperty(*name)));
+  Handle<JSFunction> function(JSFunction::cast(holder->GetProperty(*name)));
   function->shared()->set_function_data(Smi::FromInt(id));
 }
 
 
 void Genesis::InstallCustomCallGenerators() {
   HandleScope scope;
-#define INSTALL_CALL_GENERATOR(holder_fun, owner_flag, fun_name, name)    \
-  {                                                                       \
-    Handle<JSFunction> holder(global_context()->holder_fun##_function()); \
-    const int id = CallStubCompiler::k##name##CallGenerator;              \
-    InstallCustomCallGenerator(holder, CallStubCompiler::owner_flag,      \
-                               #fun_name, id);                            \
+#define INSTALL_CALL_GENERATOR(holder_expr, fun_name, name)     \
+  {                                                             \
+    Handle<JSObject> holder = ResolveCustomCallGeneratorHolder( \
+        global_context(), #holder_expr);                        \
+    const int id = CallStubCompiler::k##name##CallGenerator;    \
+    InstallCustomCallGenerator(holder, #fun_name, id);          \
   }
   CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR)
 #undef INSTALL_CALL_GENERATOR
@@ -1405,8 +1416,14 @@
   Handle<FixedArray> caches = Factory::NewFixedArray(kNumberOfCaches, TENURED);
 
   int index = 0;
-#define F(size, func) caches->set(index++, CreateCache(size, func));
-    JSFUNCTION_RESULT_CACHE_LIST(F)
+
+#define F(size, func) do {                           \
+    FixedArray* cache = CreateCache((size), (func)); \
+    caches->set(index++, cache);                     \
+  } while (false)
+
+  JSFUNCTION_RESULT_CACHE_LIST(F);
+
 #undef F
 
   global_context()->set_jsfunction_result_caches(*caches);
diff --git a/src/builtins.h b/src/builtins.h
index 7e49f31..b5e8c4e 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -65,6 +65,7 @@
 #define BUILTIN_LIST_A(V)                                                 \
   V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED)                   \
   V(JSConstructCall,            BUILTIN, UNINITIALIZED)                   \
+  V(JSConstructStubCountdown,   BUILTIN, UNINITIALIZED)                   \
   V(JSConstructStubGeneric,     BUILTIN, UNINITIALIZED)                   \
   V(JSConstructStubApi,         BUILTIN, UNINITIALIZED)                   \
   V(JSEntryTrampoline,          BUILTIN, UNINITIALIZED)                   \
@@ -249,6 +250,7 @@
                                CFunctionId id,
                                BuiltinExtraArguments extra_args);
   static void Generate_JSConstructCall(MacroAssembler* masm);
+  static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
   static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
   static void Generate_JSConstructStubApi(MacroAssembler* masm);
   static void Generate_JSEntryTrampoline(MacroAssembler* masm);
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 98a5cf6..912d43d 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -340,27 +340,40 @@
 };
 
 
+enum UnaryOpFlags {
+  NO_UNARY_FLAGS = 0,
+  NO_UNARY_SMI_CODE_IN_STUB = 1 << 0
+};
+
+
 class GenericUnaryOpStub : public CodeStub {
  public:
   GenericUnaryOpStub(Token::Value op,
                      UnaryOverwriteMode overwrite,
+                     UnaryOpFlags flags,
                      NegativeZeroHandling negative_zero = kStrictNegativeZero)
-      : op_(op), overwrite_(overwrite), negative_zero_(negative_zero) { }
+      : op_(op),
+        overwrite_(overwrite),
+        include_smi_code_((flags & NO_UNARY_SMI_CODE_IN_STUB) == 0),
+        negative_zero_(negative_zero) { }
 
  private:
   Token::Value op_;
   UnaryOverwriteMode overwrite_;
+  bool include_smi_code_;
   NegativeZeroHandling negative_zero_;
 
   class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
-  class NegativeZeroField: public BitField<NegativeZeroHandling, 1, 1> {};
-  class OpField: public BitField<Token::Value, 2, kMinorBits - 2> {};
+  class IncludeSmiCodeField: public BitField<bool, 1, 1> {};
+  class NegativeZeroField: public BitField<NegativeZeroHandling, 2, 1> {};
+  class OpField: public BitField<Token::Value, 3, kMinorBits - 3> {};
 
   Major MajorKey() { return GenericUnaryOp; }
   int MinorKey() {
     return OpField::encode(op_) |
-           OverwriteField::encode(overwrite_) |
-           NegativeZeroField::encode(negative_zero_);
+        OverwriteField::encode(overwrite_) |
+        IncludeSmiCodeField::encode(include_smi_code_) |
+        NegativeZeroField::encode(negative_zero_);
   }
 
   void Generate(MacroAssembler* masm);
@@ -375,22 +388,43 @@
 };
 
 
+// Flags that control the compare stub code generation.
+enum CompareFlags {
+  NO_COMPARE_FLAGS = 0,
+  NO_SMI_COMPARE_IN_STUB = 1 << 0,
+  NO_NUMBER_COMPARE_IN_STUB = 1 << 1,
+  CANT_BOTH_BE_NAN = 1 << 2
+};
+
+
 class CompareStub: public CodeStub {
  public:
   CompareStub(Condition cc,
               bool strict,
-              NaNInformation nan_info = kBothCouldBeNaN,
-              bool include_number_compare = true,
-              Register lhs = no_reg,
-              Register rhs = no_reg) :
+              CompareFlags flags,
+              Register lhs,
+              Register rhs) :
       cc_(cc),
       strict_(strict),
-      never_nan_nan_(nan_info == kCantBothBeNaN),
-      include_number_compare_(include_number_compare),
+      never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
+      include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
+      include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
       lhs_(lhs),
       rhs_(rhs),
       name_(NULL) { }
 
+  CompareStub(Condition cc,
+              bool strict,
+              CompareFlags flags) :
+      cc_(cc),
+      strict_(strict),
+      never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
+      include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
+      include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
+      lhs_(no_reg),
+      rhs_(no_reg),
+      name_(NULL) { }
+
   void Generate(MacroAssembler* masm);
 
  private:
@@ -406,6 +440,10 @@
   // comparison code is used when the number comparison has been inlined, and
   // the stub will be called if one of the operands is not a number.
   bool include_number_compare_;
+
+  // Generate the comparison code for two smi operands in the stub.
+  bool include_smi_compare_;
+
   // Register holding the left hand side of the comparison if the stub gives
   // a choice, no_reg otherwise.
   Register lhs_;
@@ -413,12 +451,13 @@
   // a choice, no_reg otherwise.
   Register rhs_;
 
-  // Encoding of the minor key CCCCCCCCCCCCRCNS.
+  // Encoding of the minor key in 16 bits.
   class StrictField: public BitField<bool, 0, 1> {};
   class NeverNanNanField: public BitField<bool, 1, 1> {};
   class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
-  class RegisterField: public BitField<bool, 3, 1> {};
-  class ConditionField: public BitField<int, 4, 12> {};
+  class IncludeSmiCompareField: public  BitField<bool, 3, 1> {};
+  class RegisterField: public BitField<bool, 4, 1> {};
+  class ConditionField: public BitField<int, 5, 11> {};
 
   Major MajorKey() { return Compare; }
 
@@ -436,11 +475,13 @@
   const char* GetName();
 #ifdef DEBUG
   void Print() {
-    PrintF("CompareStub (cc %d), (strict %s), "
-           "(never_nan_nan %s), (number_compare %s) ",
+    PrintF("CompareStub (minor %d) (cc %d), (strict %s), "
+           "(never_nan_nan %s), (smi_compare %s) (number_compare %s) ",
+           MinorKey(),
            static_cast<int>(cc_),
            strict_ ? "true" : "false",
            never_nan_nan_ ? "true" : "false",
+           include_smi_compare_ ? "inluded" : "not included",
            include_number_compare_ ? "included" : "not included");
 
     if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
diff --git a/src/codegen.cc b/src/codegen.cc
index 148cefc..92241d1 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -289,7 +289,7 @@
   for (int i = 0; i < length; i++) {
     Declaration* node = declarations->at(i);
     Variable* var = node->proxy()->var();
-    Slot* slot = var->slot();
+    Slot* slot = var->AsSlot();
 
     // If it was not possible to allocate the variable at compile
     // time, we need to "declare" it at runtime to make sure it
@@ -310,7 +310,7 @@
   for (int j = 0, i = 0; i < length; i++) {
     Declaration* node = declarations->at(i);
     Variable* var = node->proxy()->var();
-    Slot* slot = var->slot();
+    Slot* slot = var->AsSlot();
 
     if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
       // Skip - already processed.
@@ -344,40 +344,35 @@
 }
 
 
-// List of special runtime calls which are generated inline. For some of these
-// functions the code will be generated inline, and for others a call to a code
-// stub will be inlined.
+// Lookup table for code generators for special runtime calls which are
+// generated inline.
+#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize)          \
+    &CodeGenerator::Generate##Name,
 
-#define INLINE_RUNTIME_ENTRY(Name, argc, ressize)                             \
-    {&CodeGenerator::Generate##Name,  "_" #Name, argc},                       \
-
-CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
-  INLINE_RUNTIME_FUNCTION_LIST(INLINE_RUNTIME_ENTRY)
+const CodeGenerator::InlineFunctionGenerator
+    CodeGenerator::kInlineFunctionGenerators[] = {
+        INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
+        INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
 };
+#undef INLINE_FUNCTION_GENERATOR_ADDRESS
 
-#undef INLINE_RUNTIME_ENTRY
 
-CodeGenerator::InlineRuntimeLUT* CodeGenerator::FindInlineRuntimeLUT(
-    Handle<String> name) {
-  const int entries_count =
-      sizeof(kInlineRuntimeLUT) / sizeof(InlineRuntimeLUT);
-  for (int i = 0; i < entries_count; i++) {
-    InlineRuntimeLUT* entry = &kInlineRuntimeLUT[i];
-    if (name->IsEqualTo(CStrVector(entry->name))) {
-      return entry;
-    }
-  }
-  return NULL;
+CodeGenerator::InlineFunctionGenerator
+  CodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) {
+    return kInlineFunctionGenerators[
+      static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction)];
 }
 
 
 bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
   ZoneList<Expression*>* args = node->arguments();
   Handle<String> name = node->name();
-  if (name->length() > 0 && name->Get(0) == '_') {
-    InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
-    if (entry != NULL) {
-      ((*this).*(entry->method))(args);
+  Runtime::Function* function = node->function();
+  if (function != NULL && function->intrinsic_type == Runtime::INLINE) {
+    InlineFunctionGenerator generator =
+        FindInlineFunctionGenerator(function->function_id);
+    if (generator != NULL) {
+      ((*this).*(generator))(args);
       return true;
     }
   }
@@ -385,14 +380,6 @@
 }
 
 
-int CodeGenerator::InlineRuntimeCallArgumentsCount(Handle<String> name) {
-  CodeGenerator::InlineRuntimeLUT* f =
-      CodeGenerator::FindInlineRuntimeLUT(name);
-  if (f != NULL) return f->nargs;
-  return -1;
-}
-
-
 // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
 // known result for the test expression, with no side effects.
 CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
diff --git a/src/codegen.h b/src/codegen.h
index aa2d442..2a4d9d4 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -71,48 +71,6 @@
 //   CodeForDoWhileConditionPosition
 //   CodeForSourcePosition
 
-
-#define INLINE_RUNTIME_FUNCTION_LIST(F) \
-  F(IsSmi, 1, 1)                                                             \
-  F(IsNonNegativeSmi, 1, 1)                                                  \
-  F(IsArray, 1, 1)                                                           \
-  F(IsRegExp, 1, 1)                                                          \
-  F(CallFunction, -1 /* receiver + n args + function */, 1)                  \
-  F(IsConstructCall, 0, 1)                                                   \
-  F(ArgumentsLength, 0, 1)                                                   \
-  F(Arguments, 1, 1)                                                         \
-  F(ClassOf, 1, 1)                                                           \
-  F(ValueOf, 1, 1)                                                           \
-  F(SetValueOf, 2, 1)                                                        \
-  F(StringCharCodeAt, 2, 1)                                                  \
-  F(StringCharFromCode, 1, 1)                                                \
-  F(StringCharAt, 2, 1)                                                      \
-  F(ObjectEquals, 2, 1)                                                      \
-  F(Log, 3, 1)                                                               \
-  F(RandomHeapNumber, 0, 1)                                                  \
-  F(IsObject, 1, 1)                                                          \
-  F(IsFunction, 1, 1)                                                        \
-  F(IsUndetectableObject, 1, 1)                                              \
-  F(IsSpecObject, 1, 1)                                                      \
-  F(IsStringWrapperSafeForDefaultValueOf, 1, 1)                              \
-  F(StringAdd, 2, 1)                                                         \
-  F(SubString, 3, 1)                                                         \
-  F(StringCompare, 2, 1)                                                     \
-  F(RegExpExec, 4, 1)                                                        \
-  F(RegExpConstructResult, 3, 1)                                             \
-  F(RegExpCloneResult, 1, 1)                                                 \
-  F(GetFromCache, 2, 1)                                                      \
-  F(NumberToString, 1, 1)                                                    \
-  F(SwapElements, 3, 1)                                                      \
-  F(MathPow, 2, 1)                                                           \
-  F(MathSin, 1, 1)                                                           \
-  F(MathCos, 1, 1)                                                           \
-  F(MathSqrt, 1, 1)                                                          \
-  F(IsRegExpEquivalent, 2, 1)                                                \
-  F(HasCachedArrayIndex, 1, 1)                                               \
-  F(GetCachedArrayIndex, 1, 1)
-
-
 #if V8_TARGET_ARCH_IA32
 #include "ia32/codegen-ia32.h"
 #elif V8_TARGET_ARCH_X64
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 7402e68..b0449c4 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -110,6 +110,9 @@
   void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
 
  private:
+  MUST_USE_RESULT Object* TryTablePut(
+      Handle<String> source, Handle<SharedFunctionInfo> function_info);
+
   // Note: Returns a new hash table if operation results in expansion.
   Handle<CompilationCacheTable> TablePut(
       Handle<String> source, Handle<SharedFunctionInfo> function_info);
@@ -137,6 +140,12 @@
            Handle<SharedFunctionInfo> function_info);
 
  private:
+  MUST_USE_RESULT Object* TryTablePut(
+      Handle<String> source,
+      Handle<Context> context,
+      Handle<SharedFunctionInfo> function_info);
+
+
   // Note: Returns a new hash table if operation results in expansion.
   Handle<CompilationCacheTable> TablePut(
       Handle<String> source,
@@ -159,6 +168,10 @@
            JSRegExp::Flags flags,
            Handle<FixedArray> data);
  private:
+  MUST_USE_RESULT Object* TryTablePut(Handle<String> source,
+                                      JSRegExp::Flags flags,
+                                      Handle<FixedArray> data);
+
   // Note: Returns a new hash table if operation results in expansion.
   Handle<CompilationCacheTable> TablePut(Handle<String> source,
                                          JSRegExp::Flags flags,
@@ -320,11 +333,18 @@
 }
 
 
+Object* CompilationCacheScript::TryTablePut(
+    Handle<String> source,
+    Handle<SharedFunctionInfo> function_info) {
+  Handle<CompilationCacheTable> table = GetFirstTable();
+  return table->Put(*source, *function_info);
+}
+
+
 Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
     Handle<String> source,
     Handle<SharedFunctionInfo> function_info) {
-  CALL_HEAP_FUNCTION(GetFirstTable()->Put(*source, *function_info),
-                     CompilationCacheTable);
+  CALL_HEAP_FUNCTION(TryTablePut(source, function_info), CompilationCacheTable);
 }
 
 
@@ -366,13 +386,20 @@
 }
 
 
+Object* CompilationCacheEval::TryTablePut(
+    Handle<String> source,
+    Handle<Context> context,
+    Handle<SharedFunctionInfo> function_info) {
+  Handle<CompilationCacheTable> table = GetFirstTable();
+  return table->PutEval(*source, *context, *function_info);
+}
+
+
 Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
     Handle<String> source,
     Handle<Context> context,
     Handle<SharedFunctionInfo> function_info) {
-  CALL_HEAP_FUNCTION(GetFirstTable()->PutEval(*source,
-                                              *context,
-                                              *function_info),
+  CALL_HEAP_FUNCTION(TryTablePut(source, context, function_info),
                      CompilationCacheTable);
 }
 
@@ -415,12 +442,20 @@
 }
 
 
+Object* CompilationCacheRegExp::TryTablePut(
+    Handle<String> source,
+    JSRegExp::Flags flags,
+    Handle<FixedArray> data) {
+  Handle<CompilationCacheTable> table = GetFirstTable();
+  return table->PutRegExp(*source, flags, *data);
+}
+
+
 Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut(
     Handle<String> source,
     JSRegExp::Flags flags,
     Handle<FixedArray> data) {
-  CALL_HEAP_FUNCTION(GetFirstTable()->PutRegExp(*source, flags, *data),
-                     CompilationCacheTable);
+  CALL_HEAP_FUNCTION(TryTablePut(source, flags, data), CompilationCacheTable);
 }
 
 
diff --git a/src/compiler.cc b/src/compiler.cc
index f65f941..825198e 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -120,8 +120,9 @@
   Handle<Context> context = Handle<Context>::null();
   Handle<Code> code = MakeCode(context, info);
   if (!info->shared_info().is_null()) {
-    info->shared_info()->set_scope_info(
-        *SerializedScopeInfo::Create(info->scope()));
+    Handle<SerializedScopeInfo> scope_info =
+        SerializedScopeInfo::Create(info->scope());
+    info->shared_info()->set_scope_info(*scope_info);
   }
   return code;
 }
@@ -420,10 +421,12 @@
 
   // Update the shared function info with the compiled code and the scope info.
   // Please note, that the order of the sharedfunction initialization is
-  // important since set_scope_info might trigger a GC, causing the ASSERT
-  // below to be invalid if the code was flushed. By settting the code
+  // important since SerializedScopeInfo::Create might trigger a GC, causing
+  // the ASSERT below to be invalid if the code was flushed. By setting the code
   // object last we avoid this.
-  shared->set_scope_info(*SerializedScopeInfo::Create(info->scope()));
+  Handle<SerializedScopeInfo> scope_info =
+      SerializedScopeInfo::Create(info->scope());
+  shared->set_scope_info(*scope_info);
   shared->set_code(*code);
   if (!info->closure().is_null()) {
     info->closure()->set_code(*code);
diff --git a/src/conversions.cc b/src/conversions.cc
index 90cdc77..f15a804 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -956,8 +956,9 @@
 
 
 char* DoubleToExponentialCString(double value, int f) {
+  const int kMaxDigitsAfterPoint = 20;
   // f might be -1 to signal that f was undefined in JavaScript.
-  ASSERT(f >= -1 && f <= 20);
+  ASSERT(f >= -1 && f <= kMaxDigitsAfterPoint);
 
   bool negative = false;
   if (value < 0) {
@@ -969,29 +970,60 @@
   int decimal_point;
   int sign;
   char* decimal_rep = NULL;
+  bool used_gay_dtoa = false;
+  // f corresponds to the digits after the point. There is always one digit
+  // before the point. The number of requested_digits equals hence f + 1.
+  // And we have to add one character for the null-terminator.
+  const int kV8DtoaBufferCapacity = kMaxDigitsAfterPoint + 1 + 1;
+  // Make sure that the buffer is big enough, even if we fall back to the
+  // shortest representation (which happens when f equals -1).
+  ASSERT(kBase10MaximalLength <= kMaxDigitsAfterPoint + 1);
+  char v8_dtoa_buffer[kV8DtoaBufferCapacity];
+  int decimal_rep_length;
+
   if (f == -1) {
-    decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
-    f = StrLength(decimal_rep) - 1;
+    if (DoubleToAscii(value, DTOA_SHORTEST, 0,
+                      Vector<char>(v8_dtoa_buffer, kV8DtoaBufferCapacity),
+                      &sign, &decimal_rep_length, &decimal_point)) {
+      f = decimal_rep_length - 1;
+      decimal_rep = v8_dtoa_buffer;
+    } else {
+      decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
+      decimal_rep_length = StrLength(decimal_rep);
+      f = decimal_rep_length - 1;
+      used_gay_dtoa = true;
+    }
   } else {
-    decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
+    if (DoubleToAscii(value, DTOA_PRECISION, f + 1,
+                      Vector<char>(v8_dtoa_buffer, kV8DtoaBufferCapacity),
+                      &sign, &decimal_rep_length, &decimal_point)) {
+      decimal_rep = v8_dtoa_buffer;
+    } else {
+      decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
+      decimal_rep_length = StrLength(decimal_rep);
+      used_gay_dtoa = true;
+    }
   }
-  int decimal_rep_length = StrLength(decimal_rep);
   ASSERT(decimal_rep_length > 0);
   ASSERT(decimal_rep_length <= f + 1);
-  USE(decimal_rep_length);
 
   int exponent = decimal_point - 1;
   char* result =
       CreateExponentialRepresentation(decimal_rep, exponent, negative, f+1);
 
-  freedtoa(decimal_rep);
+  if (used_gay_dtoa) {
+    freedtoa(decimal_rep);
+  }
 
   return result;
 }
 
 
 char* DoubleToPrecisionCString(double value, int p) {
-  ASSERT(p >= 1 && p <= 21);
+  const int kMinimalDigits = 1;
+  const int kMaximalDigits = 21;
+  ASSERT(p >= kMinimalDigits && p <= kMaximalDigits);
+  USE(kMinimalDigits);
 
   bool negative = false;
   if (value < 0) {
@@ -1002,8 +1034,22 @@
   // Find a sufficiently precise decimal representation of n.
   int decimal_point;
   int sign;
-  char* decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
-  int decimal_rep_length = StrLength(decimal_rep);
+  char* decimal_rep = NULL;
+  bool used_gay_dtoa = false;
+  // Add one for the terminating null character.
+  const int kV8DtoaBufferCapacity = kMaximalDigits + 1;
+  char v8_dtoa_buffer[kV8DtoaBufferCapacity];
+  int decimal_rep_length;
+
+  if (DoubleToAscii(value, DTOA_PRECISION, p,
+                    Vector<char>(v8_dtoa_buffer, kV8DtoaBufferCapacity),
+                    &sign, &decimal_rep_length, &decimal_point)) {
+    decimal_rep = v8_dtoa_buffer;
+  } else {
+    decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
+    decimal_rep_length = StrLength(decimal_rep);
+    used_gay_dtoa = true;
+  }
   ASSERT(decimal_rep_length <= p);
 
   int exponent = decimal_point - 1;
@@ -1047,7 +1093,9 @@
     result = builder.Finalize();
   }
 
-  freedtoa(decimal_rep);
+  if (used_gay_dtoa) {
+    freedtoa(decimal_rep);
+  }
   return result;
 }
 
diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h
index cb7fdd8..5df5893 100644
--- a/src/cpu-profiler-inl.h
+++ b/src/cpu-profiler-inl.h
@@ -82,14 +82,11 @@
 
 bool ProfilerEventsProcessor::FilterOutCodeCreateEvent(
     Logger::LogEventsAndTags tag) {
-  // In browser mode, leave only callbacks and non-native JS entries.
-  // We filter out regular expressions as currently we can't tell
-  // whether they origin from native scripts, so let's not confise people by
-  // showing them weird regexes they didn't wrote.
   return FLAG_prof_browser_mode
       && (tag != Logger::CALLBACK_TAG
           && tag != Logger::FUNCTION_TAG
           && tag != Logger::LAZY_COMPILE_TAG
+          && tag != Logger::REG_EXP_TAG
           && tag != Logger::SCRIPT_TAG);
 }
 
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 4248a64..acf3349 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -32,6 +32,7 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
 #include "frames-inl.h"
+#include "hashmap.h"
 #include "log-inl.h"
 
 #include "../include/v8-profiler.h"
@@ -50,7 +51,13 @@
       ticks_buffer_(sizeof(TickSampleEventRecord),
                     kTickSamplesBufferChunkSize,
                     kTickSamplesBufferChunksCount),
-      enqueue_order_(0) {
+      enqueue_order_(0),
+      known_functions_(new HashMap(AddressesMatch)) {
+}
+
+
+ProfilerEventsProcessor::~ProfilerEventsProcessor() {
+  delete known_functions_;
 }
 
 
@@ -152,16 +159,32 @@
   rec->entry = generator_->NewCodeEntry(security_token_id);
   rec->code_start = start;
   events_buffer_.Enqueue(evt_rec);
+
+  known_functions_->Lookup(alias, AddressHash(alias), true);
 }
 
 
 void ProfilerEventsProcessor::FunctionMoveEvent(Address from, Address to) {
   CodeMoveEvent(from, to);
+
+  if (IsKnownFunction(from)) {
+    known_functions_->Remove(from, AddressHash(from));
+    known_functions_->Lookup(to, AddressHash(to), true);
+  }
 }
 
 
 void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) {
   CodeDeleteEvent(from);
+
+  known_functions_->Remove(from, AddressHash(from));
+}
+
+
+bool ProfilerEventsProcessor::IsKnownFunction(Address start) {
+  HashMap::Entry* entry =
+      known_functions_->Lookup(start, AddressHash(start), false);
+  return entry != NULL;
 }
 
 
@@ -403,6 +426,40 @@
 }
 
 
+void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function,
+                                              HeapObject* source) {
+  // This function is called from GC iterators (during Scavenge,
+  // MC, and MS), so marking bits can be set on objects. That's
+  // why unchecked accessors are used here.
+
+  // The same function can be reported several times.
+  if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile)
+      || singleton_->processor_->IsKnownFunction(function->address())) return;
+
+  int security_token_id = TokenEnumerator::kNoSecurityToken;
+  // In debug mode, assertions may fail for contexts,
+  // and we can live without security tokens in debug mode.
+#ifndef DEBUG
+  if (function->unchecked_context()->IsContext()) {
+    security_token_id = singleton_->token_enumerator_->GetTokenId(
+        function->context()->global_context()->security_token());
+  }
+  // Security token may not be moved yet.
+  if (security_token_id == TokenEnumerator::kNoSecurityToken) {
+    JSFunction* old_function = reinterpret_cast<JSFunction*>(source);
+    if (old_function->unchecked_context()->IsContext()) {
+      security_token_id = singleton_->token_enumerator_->GetTokenId(
+          old_function->context()->global_context()->security_token());
+    }
+  }
+#endif
+  singleton_->processor_->FunctionCreateEvent(
+      function->address(),
+      function->unchecked_code()->address(),
+      security_token_id);
+}
+
+
 void CpuProfiler::FunctionMoveEvent(Address from, Address to) {
   singleton_->processor_->FunctionMoveEvent(from, to);
 }
@@ -473,7 +530,12 @@
     processor_->Start();
     // Enumerate stuff we already have in the heap.
     if (Heap::HasBeenSetup()) {
-      Logger::LogCodeObjects();
+      if (!FLAG_prof_browser_mode) {
+        bool saved_log_code_flag = FLAG_log_code;
+        FLAG_log_code = true;
+        Logger::LogCodeObjects();
+        FLAG_log_code = saved_log_code_flag;
+      }
       Logger::LogCompiledFunctions();
       Logger::LogFunctionObjects();
       Logger::LogAccessorCallbacks();
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index 4d5559e..86f9f67 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -41,6 +41,7 @@
 class CodeMap;
 class CpuProfile;
 class CpuProfilesCollection;
+class HashMap;
 class ProfileGenerator;
 class TokenEnumerator;
 
@@ -132,7 +133,7 @@
 class ProfilerEventsProcessor : public Thread {
  public:
   explicit ProfilerEventsProcessor(ProfileGenerator* generator);
-  virtual ~ProfilerEventsProcessor() { }
+  virtual ~ProfilerEventsProcessor();
 
   // Thread control.
   virtual void Run();
@@ -163,6 +164,7 @@
                              Address start, unsigned size);
   // Puts current stack into tick sample events buffer.
   void AddCurrentStack();
+  bool IsKnownFunction(Address start);
 
   // Tick sample events are filled directly in the buffer of the circular
   // queue (because the structure is of fixed width, but usually not all
@@ -183,6 +185,13 @@
   bool ProcessTicks(unsigned dequeue_order);
 
   INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
+  INLINE(static bool AddressesMatch(void* key1, void* key2)) {
+    return key1 == key2;
+  }
+  INLINE(static uint32_t AddressHash(Address addr)) {
+    return ComputeIntegerHash(
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)));
+  }
 
   ProfileGenerator* generator_;
   bool running_;
@@ -190,6 +199,9 @@
   SamplingCircularQueue ticks_buffer_;
   UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
   unsigned enqueue_order_;
+
+  // Used from the VM thread.
+  HashMap* known_functions_;
 };
 
 } }  // namespace v8::internal
@@ -242,6 +254,10 @@
   static void CodeMoveEvent(Address from, Address to);
   static void CodeDeleteEvent(Address from);
   static void FunctionCreateEvent(JSFunction* function);
+  // Reports function creation in case we had missed it (e.g.
+  // if it was created from compiled code).
+  static void FunctionCreateEventFromMove(JSFunction* function,
+                                          HeapObject* source);
   static void FunctionMoveEvent(Address from, Address to);
   static void FunctionDeleteEvent(Address from);
   static void GetterCallbackEvent(String* name, Address entry_point);
diff --git a/src/data-flow.cc b/src/data-flow.cc
index d480c1b..44a1050 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -125,7 +125,7 @@
 int AssignedVariablesAnalyzer::BitIndex(Variable* var) {
   ASSERT(var != NULL);
   ASSERT(var->IsStackAllocated());
-  Slot* slot = var->slot();
+  Slot* slot = var->AsSlot();
   if (slot->type() == Slot::PARAMETER) {
     return slot->index();
   } else {
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index 0b02e21..34eb0f0 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -45,7 +45,7 @@
                      ScriptCollected: 6 };
 
 // Types of exceptions that can be broken upon.
-Debug.ExceptionBreak = { All : 0,
+Debug.ExceptionBreak = { Caught : 0,
                          Uncaught: 1 };
 
 // The different types of steps.
@@ -87,7 +87,27 @@
       this.value = !!value;
       %SetDisableBreak(!this.value);
     }
-  }
+  },
+  breakOnCaughtException: {
+    getValue: function() { return Debug.isBreakOnException(); },
+    setValue: function(value) {
+      if (value) {
+        Debug.setBreakOnException();
+      } else {
+        Debug.clearBreakOnException();
+      }
+    }
+  },
+  breakOnUncaughtException: {
+    getValue: function() { return Debug.isBreakOnUncaughtException(); },
+    setValue: function(value) {
+      if (value) {
+        Debug.setBreakOnUncaughtException();
+      } else {
+        Debug.clearBreakOnUncaughtException();
+      }
+    }
+  },
 };
 
 
@@ -781,11 +801,15 @@
 }
 
 Debug.setBreakOnException = function() {
-  return %ChangeBreakOnException(Debug.ExceptionBreak.All, true);
+  return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, true);
 };
 
 Debug.clearBreakOnException = function() {
-  return %ChangeBreakOnException(Debug.ExceptionBreak.All, false);
+  return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, false);
+};
+
+Debug.isBreakOnException = function() {
+  return !!%IsBreakOnException(Debug.ExceptionBreak.Caught);
 };
 
 Debug.setBreakOnUncaughtException = function() {
@@ -796,6 +820,10 @@
   return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
 };
 
+Debug.isBreakOnUncaughtException = function() {
+  return !!%IsBreakOnException(Debug.ExceptionBreak.Uncaught);
+};
+
 Debug.showBreakPoints = function(f, full) {
   if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
   var source = full ? this.scriptSource(f) : this.source(f);
diff --git a/src/debug.cc b/src/debug.cc
index 87780d3..53773ac 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1034,10 +1034,12 @@
   if (!break_point_object->IsJSObject()) return true;
 
   // Get the function CheckBreakPoint (defined in debug.js).
+  Handle<String> is_break_point_triggered_symbol =
+      Factory::LookupAsciiSymbol("IsBreakPointTriggered");
   Handle<JSFunction> check_break_point =
     Handle<JSFunction>(JSFunction::cast(
-      debug_context()->global()->GetProperty(
-          *Factory::LookupAsciiSymbol("IsBreakPointTriggered"))));
+        debug_context()->global()->GetProperty(
+            *is_break_point_triggered_symbol)));
 
   // Get the break id as an object.
   Handle<Object> break_id = Factory::NewNumberFromInt(Debug::break_id());
@@ -1200,6 +1202,15 @@
 }
 
 
+bool Debug::IsBreakOnException(ExceptionBreakType type) {
+  if (type == BreakUncaughtException) {
+    return break_on_uncaught_exception_;
+  } else {
+    return break_on_exception_;
+  }
+}
+
+
 void Debug::PrepareStep(StepAction step_action, int step_count) {
   HandleScope scope;
   ASSERT(Debug::InDebugger());
@@ -2167,9 +2178,11 @@
   // script. Make sure that these break points are set.
 
   // Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
+  Handle<String> update_script_break_points_symbol =
+      Factory::LookupAsciiSymbol("UpdateScriptBreakPoints");
   Handle<Object> update_script_break_points =
       Handle<Object>(Debug::debug_context()->global()->GetProperty(
-          *Factory::LookupAsciiSymbol("UpdateScriptBreakPoints")));
+          *update_script_break_points_symbol));
   if (!update_script_break_points->IsJSFunction()) {
     return;
   }
diff --git a/src/debug.h b/src/debug.h
index 8b3b29e..0d63085 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -236,6 +236,7 @@
   static void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
   static void FloodHandlerWithOneShot();
   static void ChangeBreakOnException(ExceptionBreakType type, bool enable);
+  static bool IsBreakOnException(ExceptionBreakType type);
   static void PrepareStep(StepAction step_action, int step_count);
   static void ClearStepping();
   static bool StepNextContinue(BreakLocationIterator* break_location_iterator,
diff --git a/src/dtoa.cc b/src/dtoa.cc
index e3dcbf2..f4141eb 100644
--- a/src/dtoa.cc
+++ b/src/dtoa.cc
@@ -65,11 +65,12 @@
 
   switch (mode) {
     case DTOA_SHORTEST:
-      return FastDtoa(v, buffer, length, point);
+      return FastDtoa(v, FAST_DTOA_SHORTEST, 0, buffer, length, point);
     case DTOA_FIXED:
       return FastFixedDtoa(v, requested_digits, buffer, length, point);
-    default:
-      break;
+    case DTOA_PRECISION:
+      return FastDtoa(v, FAST_DTOA_PRECISION, requested_digits,
+                      buffer, length, point);
   }
   return false;
 }
diff --git a/src/fast-dtoa.cc b/src/fast-dtoa.cc
index b4b7be0..d2a00cc 100644
--- a/src/fast-dtoa.cc
+++ b/src/fast-dtoa.cc
@@ -42,8 +42,8 @@
 //
 // A different range might be chosen on a different platform, to optimize digit
 // generation, but a smaller range requires more powers of ten to be cached.
-static const int minimal_target_exponent = -60;
-static const int maximal_target_exponent = -32;
+static const int kMinimalTargetExponent = -60;
+static const int kMaximalTargetExponent = -32;
 
 
 // Adjusts the last digit of the generated number, and screens out generated
@@ -61,13 +61,13 @@
 // Output: returns true if the buffer is guaranteed to contain the closest
 //    representable number to the input.
 //  Modifies the generated digits in the buffer to approach (round towards) w.
-bool RoundWeed(Vector<char> buffer,
-               int length,
-               uint64_t distance_too_high_w,
-               uint64_t unsafe_interval,
-               uint64_t rest,
-               uint64_t ten_kappa,
-               uint64_t unit) {
+static bool RoundWeed(Vector<char> buffer,
+                      int length,
+                      uint64_t distance_too_high_w,
+                      uint64_t unsafe_interval,
+                      uint64_t rest,
+                      uint64_t ten_kappa,
+                      uint64_t unit) {
   uint64_t small_distance = distance_too_high_w - unit;
   uint64_t big_distance = distance_too_high_w + unit;
   // Let w_low  = too_high - big_distance, and
@@ -75,7 +75,7 @@
   // Note: w_low < w < w_high
   //
   // The real w (* unit) must lie somewhere inside the interval
-  // ]w_low; w_low[ (often written as "(w_low; w_low)")
+  // ]w_low; w_high[ (often written as "(w_low; w_high)")
 
   // Basically the buffer currently contains a number in the unsafe interval
   // ]too_low; too_high[ with too_low < w < too_high
@@ -122,10 +122,10 @@
   // inside the safe interval then we simply do not know and bail out (returning
   // false).
   //
-  // Similarly we have to take into account the imprecision of 'w' when rounding
-  // the buffer. If we have two potential representations we need to make sure
-  // that the chosen one is closer to w_low and w_high since v can be anywhere
-  // between them.
+  // Similarly we have to take into account the imprecision of 'w' when finding
+  // the closest representation of 'w'. If we have two potential
+  // representations, and one is closer to both w_low and w_high, then we know
+  // it is closer to the actual value v.
   //
   // By generating the digits of too_high we got the largest (closest to
   // too_high) buffer that is still in the unsafe interval. In the case where
@@ -139,6 +139,9 @@
   //              (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high
   // Instead of using the buffer directly we use its distance to too_high.
   // Conceptually rest ~= too_high - buffer
+  // We need to do the following tests in this order to avoid over- and
+  // underflows.
+  ASSERT(rest <= unsafe_interval);
   while (rest < small_distance &&  // Negated condition 1
          unsafe_interval - rest >= ten_kappa &&  // Negated condition 2
          (rest + ten_kappa < small_distance ||  // buffer{-1} > w_high
@@ -166,6 +169,62 @@
 }
 
 
+// Rounds the buffer upwards if the result is closer to v by possibly adding
+// 1 to the buffer. If the precision of the calculation is not sufficient to
+// round correctly, return false.
+// The rounding might shift the whole buffer in which case the kappa is
+// adjusted. For example "99", kappa = 3 might become "10", kappa = 4.
+//
+// If 2*rest > ten_kappa then the buffer needs to be round up.
+// rest can have an error of +/- 1 unit. This function accounts for the
+// imprecision and returns false, if the rounding direction cannot be
+// unambiguously determined.
+//
+// Precondition: rest < ten_kappa.
+static bool RoundWeedCounted(Vector<char> buffer,
+                             int length,
+                             uint64_t rest,
+                             uint64_t ten_kappa,
+                             uint64_t unit,
+                             int* kappa) {
+  ASSERT(rest < ten_kappa);
+  // The following tests are done in a specific order to avoid overflows. They
+  // will work correctly with any uint64 values of rest < ten_kappa and unit.
+  //
+  // If the unit is too big, then we don't know which way to round. For example
+  // a unit of 50 means that the real number lies within rest +/- 50. If
+  // 10^kappa == 40 then there is no way to tell which way to round.
+  if (unit >= ten_kappa) return false;
+  // Even if unit is just half the size of 10^kappa we are already completely
+  // lost. (And after the previous test we know that the expression will not
+  // over/underflow.)
+  if (ten_kappa - unit <= unit) return false;
+  // If 2 * (rest + unit) <= 10^kappa we can safely round down.
+  if ((ten_kappa - rest > rest) && (ten_kappa - 2 * rest >= 2 * unit)) {
+    return true;
+  }
+  // If 2 * (rest - unit) >= 10^kappa, then we can safely round up.
+  if ((rest > unit) && (ten_kappa - (rest - unit) <= (rest - unit))) {
+    // Increment the last digit recursively until we find a non '9' digit.
+    buffer[length - 1]++;
+    for (int i = length - 1; i > 0; --i) {
+      if (buffer[i] != '0' + 10) break;
+      buffer[i] = '0';
+      buffer[i - 1]++;
+    }
+    // If the first digit is now '0'+ 10 we had a buffer with all '9's. With the
+    // exception of the first digit all digits are now '0'. Simply switch the
+    // first digit to '1' and adjust the kappa. Example: "99" becomes "10" and
+    // the power (the kappa) is increased.
+    if (buffer[0] == '0' + 10) {
+      buffer[0] = '1';
+      (*kappa) += 1;
+    }
+    return true;
+  }
+  return false;
+}
+
 
 static const uint32_t kTen4 = 10000;
 static const uint32_t kTen5 = 100000;
@@ -178,7 +237,7 @@
 // number. We furthermore receive the maximum number of bits 'number' has.
 // If number_bits == 0 then 0^-1 is returned
 // The number of bits must be <= 32.
-// Precondition: (1 << number_bits) <= number < (1 << (number_bits + 1)).
+// Precondition: number < (1 << (number_bits + 1)).
 static void BiggestPowerTen(uint32_t number,
                             int number_bits,
                             uint32_t* power,
@@ -281,18 +340,18 @@
 
 // Generates the digits of input number w.
 // w is a floating-point number (DiyFp), consisting of a significand and an
-// exponent. Its exponent is bounded by minimal_target_exponent and
-// maximal_target_exponent.
+// exponent. Its exponent is bounded by kMinimalTargetExponent and
+// kMaximalTargetExponent.
 //       Hence -60 <= w.e() <= -32.
 //
 // Returns false if it fails, in which case the generated digits in the buffer
 // should not be used.
 // Preconditions:
 //  * low, w and high are correct up to 1 ulp (unit in the last place). That
-//    is, their error must be less that a unit of their last digits.
+//    is, their error must be less than a unit of their last digits.
 //  * low.e() == w.e() == high.e()
 //  * low < w < high, and taking into account their error: low~ <= high~
-//  * minimal_target_exponent <= w.e() <= maximal_target_exponent
+//  * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
 // Postconditions: returns false if procedure fails.
 //   otherwise:
 //     * buffer is not null-terminated, but len contains the number of digits.
@@ -321,15 +380,15 @@
 // represent 'w' we can stop. Everything inside the interval low - high
 // represents w. However we have to pay attention to low, high and w's
 // imprecision.
-bool DigitGen(DiyFp low,
-              DiyFp w,
-              DiyFp high,
-              Vector<char> buffer,
-              int* length,
-              int* kappa) {
+static bool DigitGen(DiyFp low,
+                     DiyFp w,
+                     DiyFp high,
+                     Vector<char> buffer,
+                     int* length,
+                     int* kappa) {
   ASSERT(low.e() == w.e() && w.e() == high.e());
   ASSERT(low.f() + 1 <= high.f() - 1);
-  ASSERT(minimal_target_exponent <= w.e() && w.e() <= maximal_target_exponent);
+  ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
   // low, w and high are imprecise, but by less than one ulp (unit in the last
   // place).
   // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
@@ -359,23 +418,23 @@
   uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
   // Modulo by one is an and.
   uint64_t fractionals = too_high.f() & (one.f() - 1);
-  uint32_t divider;
-  int divider_exponent;
+  uint32_t divisor;
+  int divisor_exponent;
   BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
-                  &divider, &divider_exponent);
-  *kappa = divider_exponent + 1;
+                  &divisor, &divisor_exponent);
+  *kappa = divisor_exponent + 1;
   *length = 0;
   // Loop invariant: buffer = too_high / 10^kappa  (integer division)
   // The invariant holds for the first iteration: kappa has been initialized
-  // with the divider exponent + 1. And the divider is the biggest power of ten
+  // with the divisor exponent + 1. And the divisor is the biggest power of ten
   // that is smaller than integrals.
   while (*kappa > 0) {
-    int digit = integrals / divider;
+    int digit = integrals / divisor;
     buffer[*length] = '0' + digit;
     (*length)++;
-    integrals %= divider;
+    integrals %= divisor;
     (*kappa)--;
-    // Note that kappa now equals the exponent of the divider and that the
+    // Note that kappa now equals the exponent of the divisor and that the
     // invariant thus holds again.
     uint64_t rest =
         (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
@@ -386,32 +445,24 @@
       // that lies within the unsafe interval.
       return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
                        unsafe_interval.f(), rest,
-                       static_cast<uint64_t>(divider) << -one.e(), unit);
+                       static_cast<uint64_t>(divisor) << -one.e(), unit);
     }
-    divider /= 10;
+    divisor /= 10;
   }
 
   // The integrals have been generated. We are at the point of the decimal
   // separator. In the following loop we simply multiply the remaining digits by
   // 10 and divide by one. We just need to pay attention to multiply associated
   // data (like the interval or 'unit'), too.
-  // Instead of multiplying by 10 we multiply by 5 (cheaper operation) and
-  // increase its (imaginary) exponent. At the same time we decrease the
-  // divider's (one's) exponent and shift its significand.
-  // Basically, if fractionals was a DiyFp (with fractionals.e == one.e):
-  //      fractionals.f *= 10;
-  //      fractionals.f >>= 1; fractionals.e++; // value remains unchanged.
-  //      one.f >>= 1; one.e++;                 // value remains unchanged.
-  //      and we have again fractionals.e == one.e which allows us to divide
-  //           fractionals.f() by one.f()
-  // We simply combine the *= 10 and the >>= 1.
+  // Note that the multiplication by 10 does not overflow, because w.e >= -60
+  // and thus one.e >= -60.
+  ASSERT(one.e() >= -60);
+  ASSERT(fractionals < one.f());
+  ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
   while (true) {
-    fractionals *= 5;
-    unit *= 5;
-    unsafe_interval.set_f(unsafe_interval.f() * 5);
-    unsafe_interval.set_e(unsafe_interval.e() + 1);  // Will be optimized out.
-    one.set_f(one.f() >> 1);
-    one.set_e(one.e() + 1);
+    fractionals *= 10;
+    unit *= 10;
+    unsafe_interval.set_f(unsafe_interval.f() * 10);
     // Integer division by one.
     int digit = static_cast<int>(fractionals >> -one.e());
     buffer[*length] = '0' + digit;
@@ -426,6 +477,113 @@
 }
 
 
+
+// Generates (at most) requested_digits of input number w.
+// w is a floating-point number (DiyFp), consisting of a significand and an
+// exponent. Its exponent is bounded by kMinimalTargetExponent and
+// kMaximalTargetExponent.
+//       Hence -60 <= w.e() <= -32.
+//
+// Returns false if it fails, in which case the generated digits in the buffer
+// should not be used.
+// Preconditions:
+//  * w is correct up to 1 ulp (unit in the last place). That
+//    is, its error must be strictly less than a unit of its last digit.
+//  * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
+//
+// Postconditions: returns false if procedure fails.
+//   otherwise:
+//     * buffer is not null-terminated, but length contains the number of
+//       digits.
+//     * the representation in buffer is the most precise representation of
+//       requested_digits digits.
+//     * buffer contains at most requested_digits digits of w. If there are less
+//       than requested_digits digits then some trailing '0's have been removed.
+//     * kappa is such that
+//            w = buffer * 10^kappa + eps with |eps| < 10^kappa / 2.
+//
+// Remark: This procedure takes into account the imprecision of its input
+//   numbers. If the precision is not enough to guarantee all the postconditions
+//   then false is returned. This usually happens rarely, but the failure-rate
+//   increases with higher requested_digits.
+static bool DigitGenCounted(DiyFp w,
+                            int requested_digits,
+                            Vector<char> buffer,
+                            int* length,
+                            int* kappa) {
+  ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
+  ASSERT(kMinimalTargetExponent >= -60);
+  ASSERT(kMaximalTargetExponent <= -32);
+  // w is assumed to have an error less than 1 unit. Whenever w is scaled we
+  // also scale its error.
+  uint64_t w_error = 1;
+  // We cut the input number into two parts: the integral digits and the
+  // fractional digits. We don't emit any decimal separator, but adapt kappa
+  // instead. Example: instead of writing "1.2" we put "12" into the buffer and
+  // increase kappa by 1.
+  DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
+  // Division by one is a shift.
+  uint32_t integrals = static_cast<uint32_t>(w.f() >> -one.e());
+  // Modulo by one is an and.
+  uint64_t fractionals = w.f() & (one.f() - 1);
+  uint32_t divisor;
+  int divisor_exponent;
+  BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
+                  &divisor, &divisor_exponent);
+  *kappa = divisor_exponent + 1;
+  *length = 0;
+
+  // Loop invariant: buffer = w / 10^kappa  (integer division)
+  // The invariant holds for the first iteration: kappa has been initialized
+  // with the divisor exponent + 1. And the divisor is the biggest power of ten
+  // that is smaller than 'integrals'.
+  while (*kappa > 0) {
+    int digit = integrals / divisor;
+    buffer[*length] = '0' + digit;
+    (*length)++;
+    requested_digits--;
+    integrals %= divisor;
+    (*kappa)--;
+    // Note that kappa now equals the exponent of the divisor and that the
+    // invariant thus holds again.
+    if (requested_digits == 0) break;
+    divisor /= 10;
+  }
+
+  if (requested_digits == 0) {
+    uint64_t rest =
+        (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
+    return RoundWeedCounted(buffer, *length, rest,
+                            static_cast<uint64_t>(divisor) << -one.e(), w_error,
+                            kappa);
+  }
+
+  // The integrals have been generated. We are at the point of the decimal
+  // separator. In the following loop we simply multiply the remaining digits by
+  // 10 and divide by one. We just need to pay attention to multiply associated
+  // data (the 'unit'), too.
+  // Note that the multiplication by 10 does not overflow, because w.e >= -60
+  // and thus one.e >= -60.
+  ASSERT(one.e() >= -60);
+  ASSERT(fractionals < one.f());
+  ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
+  while (requested_digits > 0 && fractionals > w_error) {
+    fractionals *= 10;
+    w_error *= 10;
+    // Integer division by one.
+    int digit = static_cast<int>(fractionals >> -one.e());
+    buffer[*length] = '0' + digit;
+    (*length)++;
+    requested_digits--;
+    fractionals &= one.f() - 1;  // Modulo by one.
+    (*kappa)--;
+  }
+  if (requested_digits != 0) return false;
+  return RoundWeedCounted(buffer, *length, fractionals, one.f(), w_error,
+                          kappa);
+}
+
+
 // Provides a decimal representation of v.
 // Returns true if it succeeds, otherwise the result cannot be trusted.
 // There will be *length digits inside the buffer (not null-terminated).
@@ -437,7 +595,10 @@
 // The last digit will be closest to the actual v. That is, even if several
 // digits might correctly yield 'v' when read again, the closest will be
 // computed.
-bool grisu3(double v, Vector<char> buffer, int* length, int* decimal_exponent) {
+static bool Grisu3(double v,
+                   Vector<char> buffer,
+                   int* length,
+                   int* decimal_exponent) {
   DiyFp w = Double(v).AsNormalizedDiyFp();
   // boundary_minus and boundary_plus are the boundaries between v and its
   // closest floating-point neighbors. Any number strictly between
@@ -448,12 +609,12 @@
   ASSERT(boundary_plus.e() == w.e());
   DiyFp ten_mk;  // Cached power of ten: 10^-k
   int mk;        // -k
-  GetCachedPower(w.e() + DiyFp::kSignificandSize, minimal_target_exponent,
-                 maximal_target_exponent, &mk, &ten_mk);
-  ASSERT(minimal_target_exponent <= w.e() + ten_mk.e() +
-         DiyFp::kSignificandSize &&
-         maximal_target_exponent >= w.e() + ten_mk.e() +
-         DiyFp::kSignificandSize);
+  GetCachedPower(w.e() + DiyFp::kSignificandSize, kMinimalTargetExponent,
+                 kMaximalTargetExponent, &mk, &ten_mk);
+  ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+          DiyFp::kSignificandSize) &&
+         (kMaximalTargetExponent >= w.e() + ten_mk.e() +
+          DiyFp::kSignificandSize));
   // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
   // 64 bit significand and ten_mk is thus only precise up to 64 bits.
 
@@ -488,17 +649,75 @@
 }
 
 
+// The "counted" version of grisu3 (see above) only generates requested_digits
+// number of digits. This version does not generate the shortest representation,
+// and with enough requested digits 0.1 will at some point print as 0.9999999...
+// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and
+// therefore the rounding strategy for halfway cases is irrelevant.
+static bool Grisu3Counted(double v,
+                          int requested_digits,
+                          Vector<char> buffer,
+                          int* length,
+                          int* decimal_exponent) {
+  DiyFp w = Double(v).AsNormalizedDiyFp();
+  DiyFp ten_mk;  // Cached power of ten: 10^-k
+  int mk;        // -k
+  GetCachedPower(w.e() + DiyFp::kSignificandSize, kMinimalTargetExponent,
+                 kMaximalTargetExponent, &mk, &ten_mk);
+  ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+          DiyFp::kSignificandSize) &&
+         (kMaximalTargetExponent >= w.e() + ten_mk.e() +
+          DiyFp::kSignificandSize));
+  // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
+  // 64 bit significand and ten_mk is thus only precise up to 64 bits.
+
+  // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
+  // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
+  // off by a small amount.
+  // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
+  // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
+  //           (f-1) * 2^e < w*10^k < (f+1) * 2^e
+  DiyFp scaled_w = DiyFp::Times(w, ten_mk);
+
+  // We now have (double) (scaled_w * 10^-mk).
+  // DigitGen will generate the first requested_digits digits of scaled_w and
+  // return together with a kappa such that scaled_w ~= buffer * 10^kappa. (It
+  // will not always be exactly the same since DigitGenCounted only produces a
+  // limited number of digits.)
+  int kappa;
+  bool result = DigitGenCounted(scaled_w, requested_digits,
+                                buffer, length, &kappa);
+  *decimal_exponent = -mk + kappa;
+  return result;
+}
+
+
 bool FastDtoa(double v,
+              FastDtoaMode mode,
+              int requested_digits,
               Vector<char> buffer,
               int* length,
-              int* point) {
+              int* decimal_point) {
   ASSERT(v > 0);
   ASSERT(!Double(v).IsSpecial());
 
-  int decimal_exponent;
-  bool result = grisu3(v, buffer, length, &decimal_exponent);
-  *point = *length + decimal_exponent;
-  buffer[*length] = '\0';
+  bool result = false;
+  int decimal_exponent = 0;
+  switch (mode) {
+    case FAST_DTOA_SHORTEST:
+      result = Grisu3(v, buffer, length, &decimal_exponent);
+      break;
+    case FAST_DTOA_PRECISION:
+      result = Grisu3Counted(v, requested_digits,
+                             buffer, length, &decimal_exponent);
+      break;
+    default:
+      UNREACHABLE();
+  }
+  if (result) {
+    *decimal_point = *length + decimal_exponent;
+    buffer[*length] = '\0';
+  }
   return result;
 }
 
diff --git a/src/fast-dtoa.h b/src/fast-dtoa.h
index 4403a75..94c22ec 100644
--- a/src/fast-dtoa.h
+++ b/src/fast-dtoa.h
@@ -31,27 +31,52 @@
 namespace v8 {
 namespace internal {
 
+enum FastDtoaMode {
+  // Computes the shortest representation of the given input. The returned
+  // result will be the most accurate number of this length. Longer
+  // representations might be more accurate.
+  FAST_DTOA_SHORTEST,
+  // Computes a representation where the precision (number of digits) is
+  // given as input. The precision is independent of the decimal point.
+  FAST_DTOA_PRECISION
+};
+
 // FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
 // include the terminating '\0' character.
 static const int kFastDtoaMaximalLength = 17;
 
 // Provides a decimal representation of v.
-// v must be a strictly positive finite double.
+// The result should be interpreted as buffer * 10^(point - length).
+//
+// Precondition:
+//   * v must be a strictly positive finite double.
+//
 // Returns true if it succeeds, otherwise the result can not be trusted.
 // There will be *length digits inside the buffer followed by a null terminator.
-// If the function returns true then
-//   v == (double) (buffer * 10^(point - length)).
-// The digits in the buffer are the shortest representation possible: no
-// 0.099999999999 instead of 0.1.
-// The last digit will be closest to the actual v. That is, even if several
-// digits might correctly yield 'v' when read again, the buffer will contain the
-// one closest to v.
-// The variable 'sign' will be '0' if the given number is positive, and '1'
-//   otherwise.
+// If the function returns true and mode equals
+//   - FAST_DTOA_SHORTEST, then
+//     the parameter requested_digits is ignored.
+//     The result satisfies
+//         v == (double) (buffer * 10^(point - length)).
+//     The digits in the buffer are the shortest representation possible. E.g.
+//     if 0.099999999999 and 0.1 represent the same double then "1" is returned
+//     with point = 0.
+//     The last digit will be closest to the actual v. That is, even if several
+//     digits might correctly yield 'v' when read again, the buffer will contain
+//     the one closest to v.
+//   - FAST_DTOA_PRECISION, then
+//     the buffer contains requested_digits digits.
+//     the difference v - (buffer * 10^(point-length)) is closest to zero for
+//     all possible representations of requested_digits digits.
+//     If there are two values that are equally close, then FastDtoa returns
+//     false.
+// For both modes the buffer must be large enough to hold the result.
 bool FastDtoa(double d,
+              FastDtoaMode mode,
+              int requested_digits,
               Vector<char> buffer,
               int* length,
-              int* point);
+              int* decimal_point);
 
 } }  // namespace v8::internal
 
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index a63088d..263a2a4 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -174,6 +174,10 @@
 DEFINE_int(max_stack_trace_source_length, 300,
            "maximum length of function source code printed in a stack trace.")
 
+// full-codegen.cc
+DEFINE_bool(always_inline_smi_code, false,
+            "always inline smi code in non-opt code")
+
 // heap.cc
 DEFINE_int(max_new_space_size, 0, "max size of the new generation")
 DEFINE_int(max_old_space_size, 0, "max size of the old generation")
diff --git a/src/frames.cc b/src/frames.cc
index 76a441b..3cdb015 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -143,8 +143,8 @@
     state.pc_address =
         reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_));
     type = StackFrame::ComputeType(&state);
-    if (SingletonFor(type) == NULL) return;
   }
+  if (SingletonFor(type) == NULL) return;
   frame_ = SingletonFor(type, &state);
 }
 
@@ -203,13 +203,24 @@
 // -------------------------------------------------------------------------
 
 
+bool SafeStackFrameIterator::ExitFrameValidator::IsValidFP(Address fp) {
+  if (!validator_.IsValid(fp)) return false;
+  Address sp = ExitFrame::ComputeStackPointer(fp);
+  if (!validator_.IsValid(sp)) return false;
+  StackFrame::State state;
+  ExitFrame::FillState(fp, sp, &state);
+  if (!validator_.IsValid(reinterpret_cast<Address>(state.pc_address))) {
+    return false;
+  }
+  return *state.pc_address != NULL;
+}
+
+
 SafeStackFrameIterator::SafeStackFrameIterator(
     Address fp, Address sp, Address low_bound, Address high_bound) :
-    maintainer_(), low_bound_(low_bound), high_bound_(high_bound),
-    is_valid_top_(
-        IsWithinBounds(low_bound, high_bound,
-                       Top::c_entry_fp(Top::GetCurrentThread())) &&
-        Top::handler(Top::GetCurrentThread()) != NULL),
+    maintainer_(),
+    stack_validator_(low_bound, high_bound),
+    is_valid_top_(IsValidTop(low_bound, high_bound)),
     is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
     is_working_iterator_(is_valid_top_ || is_valid_fp_),
     iteration_done_(!is_working_iterator_),
@@ -217,6 +228,14 @@
 }
 
 
+bool SafeStackFrameIterator::IsValidTop(Address low_bound, Address high_bound) {
+  Address fp = Top::c_entry_fp(Top::GetCurrentThread());
+  ExitFrameValidator validator(low_bound, high_bound);
+  if (!validator.IsValidFP(fp)) return false;
+  return Top::handler(Top::GetCurrentThread()) != NULL;
+}
+
+
 void SafeStackFrameIterator::Advance() {
   ASSERT(is_working_iterator_);
   ASSERT(!done());
@@ -258,9 +277,8 @@
     // sure that caller FP address is valid.
     Address caller_fp = Memory::Address_at(
         frame->fp() + EntryFrameConstants::kCallerFPOffset);
-    if (!IsValidStackAddress(caller_fp)) {
-      return false;
-    }
+    ExitFrameValidator validator(stack_validator_);
+    if (!validator.IsValidFP(caller_fp)) return false;
   } else if (frame->is_arguments_adaptor()) {
     // See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
     // the number of arguments is stored on stack as Smi. We need to check
@@ -415,6 +433,22 @@
 }
 
 
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+  if (fp == 0) return NONE;
+  Address sp = ComputeStackPointer(fp);
+  FillState(fp, sp, state);
+  ASSERT(*state->pc_address != NULL);
+  return EXIT;
+}
+
+
+void ExitFrame::FillState(Address fp, Address sp, State* state) {
+  state->sp = sp;
+  state->fp = fp;
+  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+}
+
+
 Address StandardFrame::GetExpressionAddress(int n) const {
   const int offset = StandardFrameConstants::kExpressionsOffset;
   return fp() + offset - n * kPointerSize;
diff --git a/src/frames.h b/src/frames.h
index 2011190..2d4f338 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -67,7 +67,7 @@
   static PcToCodeCacheEntry* GetCacheEntry(Address pc);
 
  private:
-  static const int kPcToCodeCacheSize = 256;
+  static const int kPcToCodeCacheSize = 1024;
   static PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
 };
 
@@ -141,6 +141,13 @@
     NO_ID = 0
   };
 
+  struct State {
+    State() : sp(NULL), fp(NULL), pc_address(NULL) { }
+    Address sp;
+    Address fp;
+    Address* pc_address;
+  };
+
   // Copy constructor; it breaks the connection to host iterator.
   StackFrame(const StackFrame& original) {
     this->state_ = original.state_;
@@ -201,12 +208,6 @@
                      int index) const { }
 
  protected:
-  struct State {
-    Address sp;
-    Address fp;
-    Address* pc_address;
-  };
-
   explicit StackFrame(StackFrameIterator* iterator) : iterator_(iterator) { }
   virtual ~StackFrame() { }
 
@@ -318,6 +319,8 @@
   // pointer. Used when constructing the first stack frame seen by an
   // iterator and the frames following entry frames.
   static Type GetStateForFramePointer(Address fp, State* state);
+  static Address ComputeStackPointer(Address fp);
+  static void FillState(Address fp, Address sp, State* state);
 
  protected:
   explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
@@ -443,6 +446,7 @@
   inline Object* function_slot_object() const;
 
   friend class StackFrameIterator;
+  friend class StackTracer;
 };
 
 
@@ -654,12 +658,36 @@
   }
 
  private:
+  class StackAddressValidator {
+   public:
+    StackAddressValidator(Address low_bound, Address high_bound)
+        : low_bound_(low_bound), high_bound_(high_bound) { }
+    bool IsValid(Address addr) const {
+      return IsWithinBounds(low_bound_, high_bound_, addr);
+    }
+   private:
+    Address low_bound_;
+    Address high_bound_;
+  };
+
+  class ExitFrameValidator {
+   public:
+    explicit ExitFrameValidator(const StackAddressValidator& validator)
+        : validator_(validator) { }
+    ExitFrameValidator(Address low_bound, Address high_bound)
+        : validator_(low_bound, high_bound) { }
+    bool IsValidFP(Address fp);
+   private:
+    StackAddressValidator validator_;
+  };
+
   bool IsValidStackAddress(Address addr) const {
-    return IsWithinBounds(low_bound_, high_bound_, addr);
+    return stack_validator_.IsValid(addr);
   }
   bool CanIterateHandles(StackFrame* frame, StackHandler* handler);
   bool IsValidFrame(StackFrame* frame) const;
   bool IsValidCaller(StackFrame* frame);
+  static bool IsValidTop(Address low_bound, Address high_bound);
 
   // This is a nasty hack to make sure the active count is incremented
   // before the constructor for the embedded iterator is invoked. This
@@ -674,8 +702,7 @@
 
   ActiveCountMaintainer maintainer_;
   static int active_count_;
-  Address low_bound_;
-  Address high_bound_;
+  StackAddressValidator stack_validator_;
   const bool is_valid_top_;
   const bool is_valid_fp_;
   const bool is_working_iterator_;
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 7de4a00..fa835cb 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -324,42 +324,101 @@
 
 
 bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) {
-  // TODO(kasperl): Once the compare stub allows leaving out the
-  // inlined smi case, we should get rid of this check.
-  if (Token::IsCompareOp(op)) return true;
-  // TODO(kasperl): Once the unary bit not stub allows leaving out
-  // the inlined smi case, we should get rid of this check.
-  if (op == Token::BIT_NOT) return true;
   // Inline smi case inside loops, but not division and modulo which
   // are too complicated and take up too much space.
-  return (op != Token::DIV) && (op != Token::MOD) && (loop_depth_ > 0);
+  if (op == Token::DIV ||op == Token::MOD) return false;
+  if (FLAG_always_inline_smi_code) return true;
+  return loop_depth_ > 0;
 }
 
 
-void FullCodeGenerator::PrepareTest(Label* materialize_true,
-                                    Label* materialize_false,
-                                    Label** if_true,
-                                    Label** if_false,
-                                    Label** fall_through) {
-  switch (context_) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-      break;
-    case Expression::kEffect:
-      // In an effect context, the true and the false case branch to the
-      // same label.
-      *if_true = *if_false = *fall_through = materialize_true;
-      break;
-    case Expression::kValue:
-      *if_true = *fall_through = materialize_true;
-      *if_false = materialize_false;
-      break;
-    case Expression::kTest:
-      *if_true = true_label_;
-      *if_false = false_label_;
-      *fall_through = fall_through_;
-      break;
-  }
+void FullCodeGenerator::EffectContext::Plug(Register reg) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
+  // Move value into place.
+  __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
+  // Move value into place.
+  __ push(reg);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Register reg) const {
+  // For simplicity we always test the accumulator register.
+  __ Move(result_register(), reg);
+  codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
+
+
+void FullCodeGenerator::EffectContext::PlugTOS() const {
+  __ Drop(1);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
+  __ pop(result_register());
+}
+
+
+void FullCodeGenerator::StackValueContext::PlugTOS() const {
+}
+
+
+void FullCodeGenerator::TestContext::PlugTOS() const {
+  // For simplicity we always test the accumulator register.
+  __ pop(result_register());
+  codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
+
+
+void FullCodeGenerator::EffectContext::PrepareTest(
+    Label* materialize_true,
+    Label* materialize_false,
+    Label** if_true,
+    Label** if_false,
+    Label** fall_through) const {
+  // In an effect context, the true and the false case branch to the
+  // same label.
+  *if_true = *if_false = *fall_through = materialize_true;
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::PrepareTest(
+    Label* materialize_true,
+    Label* materialize_false,
+    Label** if_true,
+    Label** if_false,
+    Label** fall_through) const {
+  *if_true = *fall_through = materialize_true;
+  *if_false = materialize_false;
+}
+
+
+void FullCodeGenerator::StackValueContext::PrepareTest(
+    Label* materialize_true,
+    Label* materialize_false,
+    Label** if_true,
+    Label** if_false,
+    Label** fall_through) const {
+  *if_true = *fall_through = materialize_true;
+  *if_false = materialize_false;
+}
+
+
+void FullCodeGenerator::TestContext::PrepareTest(
+    Label* materialize_true,
+    Label* materialize_false,
+    Label** if_true,
+    Label** if_false,
+    Label** fall_through) const {
+  *if_true = true_label_;
+  *if_false = false_label_;
+  *fall_through = fall_through_;
 }
 
 
@@ -370,7 +429,7 @@
   for (int i = 0; i < length; i++) {
     Declaration* decl = declarations->at(i);
     Variable* var = decl->proxy()->var();
-    Slot* slot = var->slot();
+    Slot* slot = var->AsSlot();
 
     // If it was not possible to allocate the variable at compile
     // time, we need to "declare" it at runtime to make sure it
@@ -390,7 +449,7 @@
     for (int j = 0, i = 0; i < length; i++) {
       Declaration* decl = declarations->at(i);
       Variable* var = decl->proxy()->var();
-      Slot* slot = var->slot();
+      Slot* slot = var->AsSlot();
 
       if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
         array->set(j++, *(var->name()));
@@ -505,18 +564,36 @@
 }
 
 
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
-  Handle<String> name = expr->name();
-  SmartPointer<char> cstring = name->ToCString();
+// Lookup table for code generators for  special runtime calls which are
+// generated inline.
+#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize)          \
+    &FullCodeGenerator::Emit##Name,
 
-#define CHECK_EMIT_INLINE_CALL(name, x, y) \
-  if (strcmp("_"#name, *cstring) == 0) {   \
-    Emit##name(expr->arguments());         \
-    return;                                \
-  }
-  INLINE_RUNTIME_FUNCTION_LIST(CHECK_EMIT_INLINE_CALL)
-#undef CHECK_EMIT_INLINE_CALL
-  UNREACHABLE();
+const FullCodeGenerator::InlineFunctionGenerator
+  FullCodeGenerator::kInlineFunctionGenerators[] = {
+    INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
+    INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
+  };
+#undef INLINE_FUNCTION_GENERATOR_ADDRESS
+
+
+FullCodeGenerator::InlineFunctionGenerator
+  FullCodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) {
+    return kInlineFunctionGenerators[
+      static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction)];
+}
+
+
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
+  ZoneList<Expression*>* args = node->arguments();
+  Handle<String> name = node->name();
+  Runtime::Function* function = node->function();
+  ASSERT(function != NULL);
+  ASSERT(function->intrinsic_type == Runtime::INLINE);
+  InlineFunctionGenerator generator =
+      FindInlineFunctionGenerator(function->function_id);
+  ASSERT(generator != NULL);
+  ((*this).*(generator))(args);
 }
 
 
@@ -562,20 +639,20 @@
 
       // Load only the operands that we need to materialize.
       if (constant == kNoConstants) {
-        VisitForValue(left, kStack);
-        VisitForValue(right, kAccumulator);
+        VisitForStackValue(left);
+        VisitForAccumulatorValue(right);
       } else if (constant == kRightConstant) {
-        VisitForValue(left, kAccumulator);
+        VisitForAccumulatorValue(left);
       } else {
         ASSERT(constant == kLeftConstant);
-        VisitForValue(right, kAccumulator);
+        VisitForAccumulatorValue(right);
       }
 
       SetSourcePosition(expr->position());
       if (ShouldInlineSmiCase(op)) {
-        EmitInlineSmiBinaryOp(expr, op, context_, mode, left, right, constant);
+        EmitInlineSmiBinaryOp(expr, op, mode, left, right, constant);
       } else {
-        EmitBinaryOp(op, context_, mode);
+        EmitBinaryOp(op, mode);
       }
       break;
     }
@@ -589,39 +666,7 @@
 void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
   Label eval_right, done;
 
-  // Set up the appropriate context for the left subexpression based
-  // on the operation and our own context.  Initially assume we can
-  // inherit both true and false labels from our context.
-  if (expr->op() == Token::OR) {
-    switch (context_) {
-      case Expression::kUninitialized:
-        UNREACHABLE();
-      case Expression::kEffect:
-        VisitForControl(expr->left(), &done, &eval_right, &eval_right);
-        break;
-      case Expression::kValue:
-        VisitLogicalForValue(expr->left(), expr->op(), location_, &done);
-        break;
-      case Expression::kTest:
-        VisitForControl(expr->left(), true_label_, &eval_right, &eval_right);
-        break;
-    }
-  } else {
-    ASSERT_EQ(Token::AND, expr->op());
-    switch (context_) {
-      case Expression::kUninitialized:
-        UNREACHABLE();
-      case Expression::kEffect:
-        VisitForControl(expr->left(), &eval_right, &done, &eval_right);
-        break;
-      case Expression::kValue:
-        VisitLogicalForValue(expr->left(), expr->op(), location_, &done);
-        break;
-      case Expression::kTest:
-        VisitForControl(expr->left(), &eval_right, false_label_, &eval_right);
-        break;
-    }
-  }
+  context()->EmitLogicalLeft(expr, &eval_right, &done);
 
   __ bind(&eval_right);
   Visit(expr->right());
@@ -630,43 +675,75 @@
 }
 
 
-void FullCodeGenerator::VisitLogicalForValue(Expression* expr,
-                                             Token::Value op,
-                                             Location where,
-                                             Label* done) {
-  ASSERT(op == Token::AND || op == Token::OR);
-  VisitForValue(expr, kAccumulator);
-  __ push(result_register());
-
-  Label discard;
-  switch (where) {
-    case kAccumulator: {
-      Label restore;
-      if (op == Token::OR) {
-        DoTest(&restore, &discard, &restore);
-      } else {
-        DoTest(&discard, &restore, &restore);
-      }
-      __ bind(&restore);
-      __ pop(result_register());
-      __ jmp(done);
-      break;
-    }
-    case kStack: {
-      if (op == Token::OR) {
-        DoTest(done, &discard, &discard);
-      } else {
-        DoTest(&discard, done, &discard);
-      }
-      break;
-    }
+void FullCodeGenerator::EffectContext::EmitLogicalLeft(BinaryOperation* expr,
+                                                       Label* eval_right,
+                                                       Label* done) const {
+  if (expr->op() == Token::OR) {
+    codegen()->VisitForControl(expr->left(), done, eval_right, eval_right);
+  } else {
+    ASSERT(expr->op() == Token::AND);
+    codegen()->VisitForControl(expr->left(), eval_right, done, eval_right);
   }
+}
 
+
+void FullCodeGenerator::AccumulatorValueContext::EmitLogicalLeft(
+    BinaryOperation* expr,
+    Label* eval_right,
+    Label* done) const {
+  codegen()->Visit(expr->left());
+  // We want the value in the accumulator for the test, and on the stack in case
+  // we need it.
+  __ push(result_register());
+  Label discard, restore;
+  if (expr->op() == Token::OR) {
+    codegen()->DoTest(&restore, &discard, &restore);
+  } else {
+    ASSERT(expr->op() == Token::AND);
+    codegen()->DoTest(&discard, &restore, &restore);
+  }
+  __ bind(&restore);
+  __ pop(result_register());
+  __ jmp(done);
   __ bind(&discard);
   __ Drop(1);
 }
 
 
+void FullCodeGenerator::StackValueContext::EmitLogicalLeft(
+    BinaryOperation* expr,
+    Label* eval_right,
+    Label* done) const {
+  codegen()->VisitForAccumulatorValue(expr->left());
+  // We want the value in the accumulator for the test, and on the stack in case
+  // we need it.
+  __ push(result_register());
+  Label discard;
+  if (expr->op() == Token::OR) {
+    codegen()->DoTest(done, &discard, &discard);
+  } else {
+    ASSERT(expr->op() == Token::AND);
+    codegen()->DoTest(&discard, done, &discard);
+  }
+  __ bind(&discard);
+  __ Drop(1);
+}
+
+
+void FullCodeGenerator::TestContext::EmitLogicalLeft(BinaryOperation* expr,
+                                                     Label* eval_right,
+                                                     Label* done) const {
+  if (expr->op() == Token::OR) {
+    codegen()->VisitForControl(expr->left(),
+                               true_label_, eval_right, eval_right);
+  } else {
+    ASSERT(expr->op() == Token::AND);
+    codegen()->VisitForControl(expr->left(),
+                               eval_right, false_label_, eval_right);
+  }
+}
+
+
 void FullCodeGenerator::VisitBlock(Block* stmt) {
   Comment cmnt(masm_, "[ Block");
   Breakable nested_statement(this, stmt);
@@ -747,7 +824,7 @@
   Comment cmnt(masm_, "[ ReturnStatement");
   SetStatementPosition(stmt);
   Expression* expr = stmt->expression();
-  VisitForValue(expr, kAccumulator);
+  VisitForAccumulatorValue(expr);
 
   // Exit all nested statements.
   NestedStatement* current = nesting_stack_;
@@ -766,7 +843,7 @@
   Comment cmnt(masm_, "[ WithEnterStatement");
   SetStatementPosition(stmt);
 
-  VisitForValue(stmt->expression(), kStack);
+  VisitForStackValue(stmt->expression());
   if (stmt->is_catch_block()) {
     __ CallRuntime(Runtime::kPushCatchContext, 1);
   } else {
@@ -941,7 +1018,7 @@
     // The catch variable is *always* a variable proxy for a local variable.
     Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
     ASSERT_NOT_NULL(catch_var);
-    Slot* variable_slot = catch_var->slot();
+    Slot* variable_slot = catch_var->AsSlot();
     ASSERT_NOT_NULL(variable_slot);
     ASSERT_EQ(Slot::LOCAL, variable_slot->type());
     StoreToFrameField(SlotOffset(variable_slot), result_register());
@@ -1047,7 +1124,7 @@
                         expr->then_expression_position());
   Visit(expr->then_expression());
   // If control flow falls through Visit, jump to done.
-  if (context_ == Expression::kEffect || context_ == Expression::kValue) {
+  if (!context()->IsTest()) {
     __ jmp(&done);
   }
 
@@ -1056,7 +1133,7 @@
                         expr->else_expression_position());
   Visit(expr->else_expression());
   // If control flow falls through Visit, merge it with true case here.
-  if (context_ == Expression::kEffect || context_ == Expression::kValue) {
+  if (!context()->IsTest()) {
     __ bind(&done);
   }
 }
@@ -1070,7 +1147,7 @@
 
 void FullCodeGenerator::VisitLiteral(Literal* expr) {
   Comment cmnt(masm_, "[ Literal");
-  Apply(context_, expr);
+  context()->Plug(expr->handle());
 }
 
 
@@ -1096,17 +1173,17 @@
   // Call runtime routine to allocate the catch extension object and
   // assign the exception value to the catch variable.
   Comment cmnt(masm_, "[ CatchExtensionObject");
-  VisitForValue(expr->key(), kStack);
-  VisitForValue(expr->value(), kStack);
+  VisitForStackValue(expr->key());
+  VisitForStackValue(expr->value());
   // Create catch extension object.
   __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
-  Apply(context_, result_register());
+  context()->Plug(result_register());
 }
 
 
 void FullCodeGenerator::VisitThrow(Throw* expr) {
   Comment cmnt(masm_, "[ Throw");
-  VisitForValue(expr->exception(), kStack);
+  VisitForStackValue(expr->exception());
   __ CallRuntime(Runtime::kThrow, 1);
   // Never returns here.
 }
@@ -1136,9 +1213,9 @@
 
 void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
-  VisitForValue(args->at(0), kStack);
+  VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kRegExpCloneResult, 1);
-  Apply(context_, result_register());
+  context()->Plug(result_register());
 }
 
 #undef __
diff --git a/src/full-codegen.h b/src/full-codegen.h
index ab0fd36..03024e1 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -71,10 +71,7 @@
         info_(NULL),
         nesting_stack_(NULL),
         loop_depth_(0),
-        location_(kStack),
-        true_label_(NULL),
-        false_label_(NULL),
-        fall_through_(NULL) {
+        context_(NULL) {
   }
 
   static Handle<Code> MakeCode(CompilationInfo* info);
@@ -232,17 +229,18 @@
     DISALLOW_COPY_AND_ASSIGN(ForIn);
   };
 
-  enum Location {
-    kAccumulator,
-    kStack
-  };
-
   enum ConstantOperand {
     kNoConstants,
     kLeftConstant,
     kRightConstant
   };
 
+  // Type of a member function that generates inline code for a native function.
+  typedef void (FullCodeGenerator::*InlineFunctionGenerator)
+      (ZoneList<Expression*>*);
+
+  static const InlineFunctionGenerator kInlineFunctionGenerators[];
+
   // Compute the frame pointer relative offset for a given local or
   // parameter slot.
   int SlotOffset(Slot* slot);
@@ -256,39 +254,6 @@
                                      Expression* left,
                                      Expression* right);
 
-  // Emit code to convert a pure value (in a register, slot, as a literal,
-  // or on top of the stack) into the result expected according to an
-  // expression context.
-  void Apply(Expression::Context context, Register reg);
-
-  // Slot cannot have type Slot::LOOKUP.
-  void Apply(Expression::Context context, Slot* slot);
-
-  void Apply(Expression::Context context, Literal* lit);
-  void ApplyTOS(Expression::Context context);
-
-  // Emit code to discard count elements from the top of stack, then convert
-  // a pure value into the result expected according to an expression
-  // context.
-  void DropAndApply(int count, Expression::Context context, Register reg);
-
-  // Set up branch labels for a test expression.
-  void PrepareTest(Label* materialize_true,
-                   Label* materialize_false,
-                   Label** if_true,
-                   Label** if_false,
-                   Label** fall_through);
-
-  // Emit code to convert pure control flow to a pair of labels into the
-  // result expected according to an expression context.
-  void Apply(Expression::Context context,
-             Label* materialize_true,
-             Label* materialize_false);
-
-  // Emit code to convert constant control flow (true or false) into
-  // the result expected according to an expression context.
-  void Apply(Expression::Context context, bool flag);
-
   // Helper function to convert a pure value into a test context.  The value
   // is expected on the stack or the accumulator, depending on the platform.
   // See the platform-specific implementation for details.
@@ -310,39 +275,26 @@
   MemOperand EmitSlotSearch(Slot* slot, Register scratch);
 
   void VisitForEffect(Expression* expr) {
-    Expression::Context saved_context = context_;
-    context_ = Expression::kEffect;
+    EffectContext context(this);
     Visit(expr);
-    context_ = saved_context;
   }
 
-  void VisitForValue(Expression* expr, Location where) {
-    Expression::Context saved_context = context_;
-    Location saved_location = location_;
-    context_ = Expression::kValue;
-    location_ = where;
+  void VisitForAccumulatorValue(Expression* expr) {
+    AccumulatorValueContext context(this);
     Visit(expr);
-    context_ = saved_context;
-    location_ = saved_location;
+  }
+
+  void VisitForStackValue(Expression* expr) {
+    StackValueContext context(this);
+    Visit(expr);
   }
 
   void VisitForControl(Expression* expr,
                        Label* if_true,
                        Label* if_false,
                        Label* fall_through) {
-    Expression::Context saved_context = context_;
-    Label* saved_true = true_label_;
-    Label* saved_false = false_label_;
-    Label* saved_fall_through = fall_through_;
-    context_ = Expression::kTest;
-    true_label_ = if_true;
-    false_label_ = if_false;
-    fall_through_ = fall_through;
+    TestContext context(this, if_true, if_false, fall_through);
     Visit(expr);
-    context_ = saved_context;
-    true_label_ = saved_true;
-    false_label_ = saved_false;
-    fall_through_ = saved_fall_through;
   }
 
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
@@ -373,10 +325,13 @@
   void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode);
 
   // Platform-specific code for inline runtime calls.
+  InlineFunctionGenerator FindInlineFunctionGenerator(Runtime::FunctionId id);
+
   void EmitInlineRuntimeCall(CallRuntime* expr);
 
 #define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
   void Emit##name(ZoneList<Expression*>* arguments);
+  INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
   INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
 #undef EMIT_INLINE_RUNTIME_CALL
 
@@ -389,7 +344,7 @@
                                        TypeofState typeof_state,
                                        Label* slow,
                                        Label* done);
-  void EmitVariableLoad(Variable* expr, Expression::Context context);
+  void EmitVariableLoad(Variable* expr);
 
   // Platform-specific support for allocating a new closure based on
   // the given function info.
@@ -408,14 +363,12 @@
   // Apply the compound assignment operator. Expects the left operand on top
   // of the stack and the right one in the accumulator.
   void EmitBinaryOp(Token::Value op,
-                    Expression::Context context,
                     OverwriteMode mode);
 
   // Helper functions for generating inlined smi code for certain
   // binary operations.
   void EmitInlineSmiBinaryOp(Expression* expr,
                              Token::Value op,
-                             Expression::Context context,
                              OverwriteMode mode,
                              Expression* left,
                              Expression* right,
@@ -423,31 +376,26 @@
 
   void EmitConstantSmiBinaryOp(Expression* expr,
                                Token::Value op,
-                               Expression::Context context,
                                OverwriteMode mode,
                                bool left_is_constant_smi,
                                Smi* value);
 
   void EmitConstantSmiBitOp(Expression* expr,
                             Token::Value op,
-                            Expression::Context context,
                             OverwriteMode mode,
                             Smi* value);
 
   void EmitConstantSmiShiftOp(Expression* expr,
                               Token::Value op,
-                              Expression::Context context,
                               OverwriteMode mode,
                               Smi* value);
 
   void EmitConstantSmiAdd(Expression* expr,
-                          Expression::Context context,
                           OverwriteMode mode,
                           bool left_is_constant_smi,
                           Smi* value);
 
   void EmitConstantSmiSub(Expression* expr,
-                          Expression::Context context,
                           OverwriteMode mode,
                           bool left_is_constant_smi,
                           Smi* value);
@@ -459,8 +407,7 @@
   // Complete a variable assignment.  The right-hand-side value is expected
   // in the accumulator.
   void EmitVariableAssignment(Variable* var,
-                              Token::Value op,
-                              Expression::Context context);
+                              Token::Value op);
 
   // Complete a named property assignment.  The receiver is expected on top
   // of the stack and the right-hand-side value in the accumulator.
@@ -492,6 +439,10 @@
 
   MacroAssembler* masm() { return masm_; }
 
+  class ExpressionContext;
+  const ExpressionContext* context() { return context_; }
+  void set_new_context(const ExpressionContext* context) { context_ = context; }
+
   Handle<Script> script() { return info_->script(); }
   bool is_eval() { return info_->is_eval(); }
   FunctionLiteral* function() { return info_->function(); }
@@ -500,6 +451,9 @@
   static Register result_register();
   static Register context_register();
 
+  // Helper for calling an IC stub.
+  void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
+
   // Set fields in the stack frame. Offsets are the frame pointer relative
   // offsets defined in, e.g., StandardFrameConstants.
   void StoreToFrameField(int frame_offset, Register value);
@@ -518,13 +472,7 @@
   // Handles the shortcutted logical binary operations in VisitBinaryOperation.
   void EmitLogicalOperation(BinaryOperation* expr);
 
-  void VisitForTypeofValue(Expression* expr, Location where);
-
-  void VisitLogicalForValue(Expression* expr,
-                            Token::Value op,
-                            Location where,
-                            Label* done);
-
+  void VisitForTypeofValue(Expression* expr);
 
   MacroAssembler* masm_;
   CompilationInfo* info_;
@@ -533,11 +481,178 @@
   NestedStatement* nesting_stack_;
   int loop_depth_;
 
-  Expression::Context context_;
-  Location location_;
-  Label* true_label_;
-  Label* false_label_;
-  Label* fall_through_;
+  class ExpressionContext {
+   public:
+    explicit ExpressionContext(FullCodeGenerator* codegen)
+        : masm_(codegen->masm()), old_(codegen->context()), codegen_(codegen) {
+      codegen->set_new_context(this);
+    }
+
+    virtual ~ExpressionContext() {
+      codegen_->set_new_context(old_);
+    }
+
+    // Convert constant control flow (true or false) to the result expected for
+    // this expression context.
+    virtual void Plug(bool flag) const = 0;
+
+    // Emit code to convert a pure value (in a register, slot, as a literal,
+    // or on top of the stack) into the result expected according to this
+    // expression context.
+    virtual void Plug(Register reg) const = 0;
+    virtual void Plug(Slot* slot) const = 0;
+    virtual void Plug(Handle<Object> lit) const = 0;
+    virtual void Plug(Heap::RootListIndex index) const = 0;
+    virtual void PlugTOS() const = 0;
+
+    // Emit code to convert pure control flow to a pair of unbound labels into
+    // the result expected according to this expression context.  The
+    // implementation may decide to bind either of the labels.
+    virtual void Plug(Label* materialize_true,
+                      Label* materialize_false) const = 0;
+
+    // Emit code to discard count elements from the top of stack, then convert
+    // a pure value into the result expected according to this expression
+    // context.
+    virtual void DropAndPlug(int count, Register reg) const = 0;
+
+    // For shortcutting operations || and &&.
+    virtual void EmitLogicalLeft(BinaryOperation* expr,
+                                 Label* eval_right,
+                                 Label* done) const = 0;
+
+    // Set up branch labels for a test expression.  The three Label** parameters
+    // are output parameters.
+    virtual void PrepareTest(Label* materialize_true,
+                             Label* materialize_false,
+                             Label** if_true,
+                             Label** if_false,
+                             Label** fall_through) const = 0;
+
+    // Returns true if we are evaluating only for side effects (ie if the result
+    // will be discarded.
+    virtual bool IsEffect() const { return false; }
+
+    // Returns true if we are branching on the value rather than materializing
+    // it.
+    virtual bool IsTest() const { return false; }
+
+   protected:
+    FullCodeGenerator* codegen() const { return codegen_; }
+    MacroAssembler* masm() const { return masm_; }
+    MacroAssembler* masm_;
+
+   private:
+    const ExpressionContext* old_;
+    FullCodeGenerator* codegen_;
+  };
+
+  class AccumulatorValueContext : public ExpressionContext {
+   public:
+    explicit AccumulatorValueContext(FullCodeGenerator* codegen)
+        : ExpressionContext(codegen) { }
+
+    virtual void Plug(bool flag) const;
+    virtual void Plug(Register reg) const;
+    virtual void Plug(Label* materialize_true, Label* materialize_false) const;
+    virtual void Plug(Slot* slot) const;
+    virtual void Plug(Handle<Object> lit) const;
+    virtual void Plug(Heap::RootListIndex) const;
+    virtual void PlugTOS() const;
+    virtual void DropAndPlug(int count, Register reg) const;
+    virtual void EmitLogicalLeft(BinaryOperation* expr,
+                                 Label* eval_right,
+                                 Label* done) const;
+    virtual void PrepareTest(Label* materialize_true,
+                             Label* materialize_false,
+                             Label** if_true,
+                             Label** if_false,
+                             Label** fall_through) const;
+  };
+
+  class StackValueContext : public ExpressionContext {
+   public:
+    explicit StackValueContext(FullCodeGenerator* codegen)
+        : ExpressionContext(codegen) { }
+
+    virtual void Plug(bool flag) const;
+    virtual void Plug(Register reg) const;
+    virtual void Plug(Label* materialize_true, Label* materialize_false) const;
+    virtual void Plug(Slot* slot) const;
+    virtual void Plug(Handle<Object> lit) const;
+    virtual void Plug(Heap::RootListIndex) const;
+    virtual void PlugTOS() const;
+    virtual void DropAndPlug(int count, Register reg) const;
+    virtual void EmitLogicalLeft(BinaryOperation* expr,
+                                 Label* eval_right,
+                                 Label* done) const;
+    virtual void PrepareTest(Label* materialize_true,
+                             Label* materialize_false,
+                             Label** if_true,
+                             Label** if_false,
+                             Label** fall_through) const;
+  };
+
+  class TestContext : public ExpressionContext {
+   public:
+    explicit TestContext(FullCodeGenerator* codegen,
+                         Label* true_label,
+                         Label* false_label,
+                         Label* fall_through)
+        : ExpressionContext(codegen),
+          true_label_(true_label),
+          false_label_(false_label),
+          fall_through_(fall_through) { }
+
+    virtual void Plug(bool flag) const;
+    virtual void Plug(Register reg) const;
+    virtual void Plug(Label* materialize_true, Label* materialize_false) const;
+    virtual void Plug(Slot* slot) const;
+    virtual void Plug(Handle<Object> lit) const;
+    virtual void Plug(Heap::RootListIndex) const;
+    virtual void PlugTOS() const;
+    virtual void DropAndPlug(int count, Register reg) const;
+    virtual void EmitLogicalLeft(BinaryOperation* expr,
+                                 Label* eval_right,
+                                 Label* done) const;
+    virtual void PrepareTest(Label* materialize_true,
+                             Label* materialize_false,
+                             Label** if_true,
+                             Label** if_false,
+                             Label** fall_through) const;
+    virtual bool IsTest() const { return true; }
+
+   private:
+    Label* true_label_;
+    Label* false_label_;
+    Label* fall_through_;
+  };
+
+  class EffectContext : public ExpressionContext {
+   public:
+    explicit EffectContext(FullCodeGenerator* codegen)
+        : ExpressionContext(codegen) { }
+
+    virtual void Plug(bool flag) const;
+    virtual void Plug(Register reg) const;
+    virtual void Plug(Label* materialize_true, Label* materialize_false) const;
+    virtual void Plug(Slot* slot) const;
+    virtual void Plug(Handle<Object> lit) const;
+    virtual void Plug(Heap::RootListIndex) const;
+    virtual void PlugTOS() const;
+    virtual void DropAndPlug(int count, Register reg) const;
+    virtual void EmitLogicalLeft(BinaryOperation* expr,
+                                 Label* eval_right,
+                                 Label* done) const;
+    virtual void PrepareTest(Label* materialize_true,
+                             Label* materialize_false,
+                             Label** if_true,
+                             Label** if_false,
+                             Label** fall_through) const;
+    virtual bool IsEffect() const { return true; }
+  };
+
+  const ExpressionContext* context_;
 
   friend class NestedStatement;
 
diff --git a/src/global-handles.cc b/src/global-handles.cc
index f089b85..a909caf 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -372,13 +372,14 @@
 
 int post_gc_processing_count = 0;
 
-void GlobalHandles::PostGarbageCollectionProcessing() {
+bool GlobalHandles::PostGarbageCollectionProcessing() {
   // Process weak global handle callbacks. This must be done after the
   // GC is completely done, because the callbacks may invoke arbitrary
   // API functions.
   // At the same time deallocate all DESTROYED nodes.
   ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
   const int initial_post_gc_processing_count = ++post_gc_processing_count;
+  bool weak_callback_invoked = false;
   Node** p = &head_;
   while (*p != NULL) {
     if ((*p)->PostGarbageCollectionProcessing()) {
@@ -389,6 +390,7 @@
         // restart the processing).
         break;
       }
+      weak_callback_invoked = true;
     }
     if ((*p)->state_ == Node::DESTROYED) {
       // Delete the link.
@@ -407,6 +409,7 @@
   if (first_deallocated()) {
     first_deallocated()->set_next(head());
   }
+  return weak_callback_invoked;
 }
 
 
diff --git a/src/global-handles.h b/src/global-handles.h
index 659f86e..c4c59fd 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -95,8 +95,9 @@
   // Tells whether global handle is weak.
   static bool IsWeak(Object** location);
 
-  // Process pending weak handles.
-  static void PostGarbageCollectionProcessing();
+  // Process pending weak handles.  Returns true if any weak handle
+  // callback has been invoked.
+  static bool PostGarbageCollectionProcessing();
 
   // Iterates over all strong handles.
   static void IterateStrongRoots(ObjectVisitor* v);
diff --git a/src/globals.h b/src/globals.h
index f168d6e..fbc749d 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -214,6 +214,12 @@
 const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
 const intptr_t kMapAlignmentMask = kMapAlignment - 1;
 
+// Desired alignment for generated code is 32 bytes (to improve cache line
+// utilization).
+const int kCodeAlignmentBits = 5;
+const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
+const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
+
 // Tag information for Failure.
 const int kFailureTag = 3;
 const int kFailureTagSize = 2;
@@ -588,6 +594,10 @@
 #define MAP_POINTER_ALIGN(value)                                \
   (((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
 
+// CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
+#define CODE_POINTER_ALIGN(value)                               \
+  (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
+
 // The expression OFFSET_OF(type, field) computes the byte-offset
 // of the specified field relative to the containing type. This
 // corresponds to 'offsetof' (in stddef.h), except that it doesn't
diff --git a/src/handles.cc b/src/handles.cc
index 0146401..78a7fcf 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -31,7 +31,6 @@
 #include "api.h"
 #include "arguments.h"
 #include "bootstrapper.h"
-#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "execution.h"
@@ -143,6 +142,13 @@
 
 
 void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
+  // If objects constructed from this function exist then changing
+  // 'estimated_nof_properties' is dangerous since the previois value might
+  // have been compiled into the fast construct stub. More over, the inobject
+  // slack tracking logic might have adjusted the previous value, so even
+  // passing the same value is risky.
+  if (func->shared()->live_objects_may_exist()) return;
+
   func->shared()->set_expected_nof_properties(nof);
   if (func->has_initial_map()) {
     Handle<Map> new_initial_map =
@@ -159,16 +165,25 @@
 
 
 static int ExpectedNofPropertiesFromEstimate(int estimate) {
-  // TODO(1231235): We need dynamic feedback to estimate the number
-  // of expected properties in an object. The static hack below
-  // is barely a solution.
-  if (estimate == 0) return 4;
-  return estimate + 2;
+  // If no properties are added in the constructor, they are more likely
+  // to be added later.
+  if (estimate == 0) estimate = 2;
+
+  // We do not shrink objects that go into a snapshot (yet), so we adjust
+  // the estimate conservatively.
+  if (Serializer::enabled()) return estimate + 2;
+
+  // Inobject slack tracking will reclaim redundant inobject space later,
+  // so we can afford to adjust the estimate generously.
+  return estimate + 6;
 }
 
 
 void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
                                           int estimate) {
+  // See the comment in SetExpectedNofProperties.
+  if (shared->live_objects_may_exist()) return;
+
   shared->set_expected_nof_properties(
       ExpectedNofPropertiesFromEstimate(estimate));
 }
@@ -467,7 +482,8 @@
 
   if (!script->source()->IsString()) {
     ASSERT(script->source()->IsUndefined());
-    script->set_line_ends(*(Factory::NewFixedArray(0)));
+    Handle<FixedArray> empty = Factory::NewFixedArray(0);
+    script->set_line_ends(*empty);
     ASSERT(script->line_ends()->IsFixedArray());
     return;
   }
diff --git a/src/hashmap.h b/src/hashmap.h
index b92c715..3b947be 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -83,12 +83,12 @@
   void Clear();
 
   // The number of (non-empty) entries in the table.
-  uint32_t occupancy() const  { return occupancy_; }
+  uint32_t occupancy() const { return occupancy_; }
 
   // The capacity of the table. The implementation
   // makes sure that occupancy is at most 80% of
   // the table capacity.
-  uint32_t capacity() const  { return capacity_; }
+  uint32_t capacity() const { return capacity_; }
 
   // Iteration
   //
@@ -108,7 +108,7 @@
   uint32_t capacity_;
   uint32_t occupancy_;
 
-  Entry* map_end() const  { return map_ + capacity_; }
+  Entry* map_end() const { return map_ + capacity_; }
   Entry* Probe(void* key, uint32_t hash);
   void Initialize(uint32_t capacity);
   void Resize();
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 0d1ad5a..b68f5c1 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -35,6 +35,16 @@
 namespace v8 {
 namespace internal {
 
+void Heap::UpdateOldSpaceLimits() {
+  int old_gen_size = PromotedSpaceSize();
+  old_gen_promotion_limit_ =
+      old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
+  old_gen_allocation_limit_ =
+      old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
+  old_gen_exhausted_ = false;
+}
+
+
 int Heap::MaxObjectSizeInPagedSpace() {
   return Page::kMaxHeapObjectSize;
 }
@@ -49,6 +59,11 @@
 }
 
 
+Object* Heap::CopyFixedArray(FixedArray* src) {
+  return CopyFixedArrayWithMap(src, src->map());
+}
+
+
 Object* Heap::AllocateRaw(int size_in_bytes,
                           AllocationSpace space,
                           AllocationSpace retry_space) {
@@ -403,7 +418,7 @@
     }                                                                     \
     if (!__object__->IsRetryAfterGC()) RETURN_EMPTY;                      \
     Counters::gc_last_resort_from_handles.Increment();                    \
-    Heap::CollectAllGarbage(false);                                       \
+    Heap::CollectAllAvailableGarbage();                                   \
     {                                                                     \
       AlwaysAllocateScope __scope__;                                      \
       __object__ = FUNCTION_CALL;                                         \
diff --git a/src/heap.cc b/src/heap.cc
index 443c926..047e331 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -55,7 +55,6 @@
 String* Heap::hidden_symbol_;
 Object* Heap::roots_[Heap::kRootListLength];
 
-
 NewSpace Heap::new_space_;
 OldSpace* Heap::old_pointer_space_ = NULL;
 OldSpace* Heap::old_data_space_ = NULL;
@@ -64,9 +63,6 @@
 CellSpace* Heap::cell_space_ = NULL;
 LargeObjectSpace* Heap::lo_space_ = NULL;
 
-static const int kMinimumPromotionLimit = 2*MB;
-static const int kMinimumAllocationLimit = 8*MB;
-
 int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
 int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
 
@@ -405,17 +401,26 @@
 }
 
 
-void Heap::CollectAllGarbage(bool force_compaction) {
+void Heap::CollectAllGarbage(bool force_compaction,
+                             CollectionPolicy collectionPolicy) {
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
   MarkCompactCollector::SetForceCompaction(force_compaction);
-  CollectGarbage(0, OLD_POINTER_SPACE);
+  CollectGarbage(0, OLD_POINTER_SPACE, collectionPolicy);
   MarkCompactCollector::SetForceCompaction(false);
 }
 
 
-bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
+void Heap::CollectAllAvailableGarbage() {
+  CompilationCache::Clear();
+  CollectAllGarbage(true, AGGRESSIVE);
+}
+
+
+bool Heap::CollectGarbage(int requested_size,
+                          AllocationSpace space,
+                          CollectionPolicy collectionPolicy) {
   // The VM is in the GC state until exiting this function.
   VMState state(GC);
 
@@ -442,7 +447,7 @@
         ? &Counters::gc_scavenger
         : &Counters::gc_compactor;
     rate->Start();
-    PerformGarbageCollection(space, collector, &tracer);
+    PerformGarbageCollection(collector, &tracer, collectionPolicy);
     rate->Stop();
 
     GarbageCollectionEpilogue();
@@ -475,7 +480,7 @@
 
 void Heap::PerformScavenge() {
   GCTracer tracer;
-  PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
+  PerformGarbageCollection(SCAVENGER, &tracer, NORMAL);
 }
 
 
@@ -664,9 +669,9 @@
   survival_rate_ = survival_rate;
 }
 
-void Heap::PerformGarbageCollection(AllocationSpace space,
-                                    GarbageCollector collector,
-                                    GCTracer* tracer) {
+void Heap::PerformGarbageCollection(GarbageCollector collector,
+                                    GCTracer* tracer,
+                                    CollectionPolicy collectionPolicy) {
   VerifySymbolTable();
   if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
     ASSERT(!allocation_allowed_);
@@ -696,25 +701,45 @@
 
     UpdateSurvivalRateTrend(start_new_space_size);
 
-    int old_gen_size = PromotedSpaceSize();
-    old_gen_promotion_limit_ =
-        old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
-    old_gen_allocation_limit_ =
-        old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
+    UpdateOldSpaceLimits();
 
-    if (high_survival_rate_during_scavenges &&
-        IsStableOrIncreasingSurvivalTrend()) {
-      // Stable high survival rates of young objects both during partial and
-      // full collection indicate that mutator is either building or modifying
-      // a structure with a long lifetime.
-      // In this case we aggressively raise old generation memory limits to
-      // postpone subsequent mark-sweep collection and thus trade memory
-      // space for the mutation speed.
-      old_gen_promotion_limit_ *= 2;
-      old_gen_allocation_limit_ *= 2;
+    // Major GC would invoke weak handle callbacks on weakly reachable
+    // handles, but won't collect weakly reachable objects until next
+    // major GC.  Therefore if we collect aggressively and weak handle callback
+    // has been invoked, we rerun major GC to release objects which become
+    // garbage.
+    if (collectionPolicy == AGGRESSIVE) {
+      // Note: as weak callbacks can execute arbitrary code, we cannot
+      // hope that eventually there will be no weak callbacks invocations.
+      // Therefore stop recollecting after several attempts.
+      const int kMaxNumberOfAttempts = 7;
+      for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
+        { DisableAssertNoAllocation allow_allocation;
+          GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+          if (!GlobalHandles::PostGarbageCollectionProcessing()) break;
+        }
+        MarkCompact(tracer);
+        // Weak handle callbacks can allocate data, so keep limits correct.
+        UpdateOldSpaceLimits();
+      }
+    } else {
+      if (high_survival_rate_during_scavenges &&
+          IsStableOrIncreasingSurvivalTrend()) {
+        // Stable high survival rates of young objects both during partial and
+        // full collection indicate that mutator is either building or modifying
+        // a structure with a long lifetime.
+        // In this case we aggressively raise old generation memory limits to
+        // postpone subsequent mark-sweep collection and thus trade memory
+        // space for the mutation speed.
+        old_gen_promotion_limit_ *= 2;
+        old_gen_allocation_limit_ *= 2;
+      }
     }
 
-    old_gen_exhausted_ = false;
+    { DisableAssertNoAllocation allow_allocation;
+      GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+      GlobalHandles::PostGarbageCollectionProcessing();
+    }
   } else {
     tracer_ = tracer;
     Scavenge();
@@ -725,12 +750,6 @@
 
   Counters::objs_since_last_young.Set(0);
 
-  if (collector == MARK_COMPACTOR) {
-    DisableAssertNoAllocation allow_allocation;
-    GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
-    GlobalHandles::PostGarbageCollectionProcessing();
-  }
-
   // Update relocatables.
   Relocatable::PostGarbageCollectionProcessing();
 
@@ -1199,7 +1218,14 @@
     RecordCopiedObject(target);
 #endif
     HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
-
+#if defined(ENABLE_LOGGING_AND_PROFILING)
+    if (Logger::is_logging() || CpuProfiler::is_profiling()) {
+      if (target->IsJSFunction()) {
+        PROFILE(FunctionMoveEvent(source->address(), target->address()));
+        PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target), source));
+      }
+    }
+#endif
     return target;
   }
 
@@ -1834,6 +1860,13 @@
 
   CreateFixedStubs();
 
+  // Allocate the dictionary of intrinsic function names.
+  obj = StringDictionary::Allocate(Runtime::kNumFunctions);
+  if (obj->IsFailure()) return false;
+  obj = Runtime::InitializeIntrinsicFunctionNames(obj);
+  if (obj->IsFailure()) return false;
+  set_intrinsic_function_names(StringDictionary::cast(obj));
+
   if (InitializeNumberStringCache()->IsFailure()) return false;
 
   // Allocate cache for single character ASCII strings.
@@ -2042,6 +2075,7 @@
   share->set_debug_info(undefined_value());
   share->set_inferred_name(empty_string());
   share->set_compiler_hints(0);
+  share->set_initial_map(undefined_value());
   share->set_this_property_assignments_count(0);
   share->set_this_property_assignments(undefined_value());
   share->set_num_literals(0);
@@ -2410,7 +2444,7 @@
   // Compute size
   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
   int obj_size = Code::SizeFor(body_size);
-  ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
+  ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
   Object* result;
   if (obj_size > MaxObjectSizeInPagedSpace()) {
     result = lo_space_->AllocateRawCode(obj_size);
@@ -2624,6 +2658,20 @@
 }
 
 
+static bool HasDuplicates(DescriptorArray* descriptors) {
+  int count = descriptors->number_of_descriptors();
+  if (count > 1) {
+    String* prev_key = descriptors->GetKey(0);
+    for (int i = 1; i != count; i++) {
+      String* current_key = descriptors->GetKey(i);
+      if (prev_key == current_key) return true;
+      prev_key = current_key;
+    }
+  }
+  return false;
+}
+
+
 Object* Heap::AllocateInitialMap(JSFunction* fun) {
   ASSERT(!fun->has_initial_map());
 
@@ -2657,24 +2705,38 @@
   if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
     int count = fun->shared()->this_property_assignments_count();
     if (count > in_object_properties) {
-      count = in_object_properties;
+      // Inline constructor can only handle inobject properties.
+      fun->shared()->ForbidInlineConstructor();
+    } else {
+      Object* descriptors_obj = DescriptorArray::Allocate(count);
+      if (descriptors_obj->IsFailure()) return descriptors_obj;
+      DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
+      for (int i = 0; i < count; i++) {
+        String* name = fun->shared()->GetThisPropertyAssignmentName(i);
+        ASSERT(name->IsSymbol());
+        FieldDescriptor field(name, i, NONE);
+        field.SetEnumerationIndex(i);
+        descriptors->Set(i, &field);
+      }
+      descriptors->SetNextEnumerationIndex(count);
+      descriptors->SortUnchecked();
+
+      // The descriptors may contain duplicates because the compiler does not
+      // guarantee the uniqueness of property names (it would have required
+      // quadratic time). Once the descriptors are sorted we can check for
+      // duplicates in linear time.
+      if (HasDuplicates(descriptors)) {
+        fun->shared()->ForbidInlineConstructor();
+      } else {
+        map->set_instance_descriptors(descriptors);
+        map->set_pre_allocated_property_fields(count);
+        map->set_unused_property_fields(in_object_properties - count);
+      }
     }
-    Object* descriptors_obj = DescriptorArray::Allocate(count);
-    if (descriptors_obj->IsFailure()) return descriptors_obj;
-    DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
-    for (int i = 0; i < count; i++) {
-      String* name = fun->shared()->GetThisPropertyAssignmentName(i);
-      ASSERT(name->IsSymbol());
-      FieldDescriptor field(name, i, NONE);
-      field.SetEnumerationIndex(i);
-      descriptors->Set(i, &field);
-    }
-    descriptors->SetNextEnumerationIndex(count);
-    descriptors->Sort();
-    map->set_instance_descriptors(descriptors);
-    map->set_pre_allocated_property_fields(count);
-    map->set_unused_property_fields(in_object_properties - count);
   }
+
+  fun->shared()->StartInobjectSlackTracking(map);
+
   return map;
 }
 
@@ -2691,7 +2753,20 @@
   // fixed array (eg, Heap::empty_fixed_array()).  Currently, the object
   // verification code has to cope with (temporarily) invalid objects.  See
   // for example, JSArray::JSArrayVerify).
-  obj->InitializeBody(map->instance_size());
+  Object* filler;
+  // We cannot always fill with one_pointer_filler_map because objects
+  // created from API functions expect their internal fields to be initialized
+  // with undefined_value.
+  if (map->constructor()->IsJSFunction() &&
+      JSFunction::cast(map->constructor())->shared()->
+          IsInobjectSlackTrackingInProgress()) {
+    // We might want to shrink the object later.
+    ASSERT(obj->GetInternalFieldCount() == 0);
+    filler = Heap::one_pointer_filler_map();
+  } else {
+    filler = Heap::undefined_value();
+  }
+  obj->InitializeBody(map->instance_size(), filler);
 }
 
 
@@ -2874,19 +2949,13 @@
 
 Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
                                         JSGlobalProxy* object) {
-  // Allocate initial map if absent.
-  if (!constructor->has_initial_map()) {
-    Object* initial_map = AllocateInitialMap(constructor);
-    if (initial_map->IsFailure()) return initial_map;
-    constructor->set_initial_map(Map::cast(initial_map));
-    Map::cast(initial_map)->set_constructor(constructor);
-  }
-
+  ASSERT(constructor->has_initial_map());
   Map* map = constructor->initial_map();
 
-  // Check that the already allocated object has the same size as
+  // Check that the already allocated object has the same size and type as
   // objects allocated using the constructor.
   ASSERT(map->instance_size() == object->map()->instance_size());
+  ASSERT(map->instance_type() == object->map()->instance_type());
 
   // Allocate the backing storage for the properties.
   int prop_size = map->unused_property_fields() - map->inobject_properties();
@@ -3133,6 +3202,7 @@
   if (length < 0 || length > FixedArray::kMaxLength) {
     return Failure::OutOfMemoryException();
   }
+  ASSERT(length > 0);
   // Use the general function if we're forced to always allocate.
   if (always_allocate()) return AllocateFixedArray(length, TENURED);
   // Allocate the raw data for a fixed array.
@@ -3143,16 +3213,19 @@
 }
 
 
-Object* Heap::CopyFixedArray(FixedArray* src) {
+Object* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
   int len = src->length();
   Object* obj = AllocateRawFixedArray(len);
   if (obj->IsFailure()) return obj;
   if (Heap::InNewSpace(obj)) {
     HeapObject* dst = HeapObject::cast(obj);
-    CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len));
+    dst->set_map(map);
+    CopyBlock(dst->address() + kPointerSize,
+              src->address() + kPointerSize,
+              FixedArray::SizeFor(len) - kPointerSize);
     return obj;
   }
-  HeapObject::cast(obj)->set_map(src->map());
+  HeapObject::cast(obj)->set_map(map);
   FixedArray* result = FixedArray::cast(obj);
   result->set_length(len);
 
diff --git a/src/heap.h b/src/heap.h
index 484cd22..8a11530 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -114,6 +114,7 @@
   V(Object, last_script_id, LastScriptId)                                      \
   V(Script, empty_script, EmptyScript)                                         \
   V(Smi, real_stack_limit, RealStackLimit)                                     \
+  V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames)        \
 
 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
 #define STRONG_ROOT_LIST(V)                                                    \
@@ -497,7 +498,12 @@
 
   // Make a copy of src and return it. Returns
   // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
-  MUST_USE_RESULT static Object* CopyFixedArray(FixedArray* src);
+  MUST_USE_RESULT static inline Object* CopyFixedArray(FixedArray* src);
+
+  // Make a copy of src, set the map, and return the copy. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  MUST_USE_RESULT static Object* CopyFixedArrayWithMap(FixedArray* src,
+                                                       Map* map);
 
   // Allocates a fixed array initialized with the hole values.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -686,13 +692,21 @@
   static void GarbageCollectionPrologue();
   static void GarbageCollectionEpilogue();
 
+  enum CollectionPolicy { NORMAL, AGGRESSIVE };
+
   // Performs garbage collection operation.
   // Returns whether required_space bytes are available after the collection.
-  static bool CollectGarbage(int required_space, AllocationSpace space);
+  static bool CollectGarbage(int required_space,
+                             AllocationSpace space,
+                             CollectionPolicy collectionPolicy = NORMAL);
 
   // Performs a full garbage collection. Force compaction if the
   // parameter is true.
-  static void CollectAllGarbage(bool force_compaction);
+  static void CollectAllGarbage(bool force_compaction,
+                                CollectionPolicy collectionPolicy = NORMAL);
+
+  // Last hope GC, should try to squeeze as much as possible.
+  static void CollectAllAvailableGarbage();
 
   // Notify the heap that a context has been disposed.
   static int NotifyContextDisposed() { return ++contexts_disposed_; }
@@ -1213,9 +1227,14 @@
   static GarbageCollector SelectGarbageCollector(AllocationSpace space);
 
   // Performs garbage collection
-  static void PerformGarbageCollection(AllocationSpace space,
-                                       GarbageCollector collector,
-                                       GCTracer* tracer);
+  static void PerformGarbageCollection(GarbageCollector collector,
+                                       GCTracer* tracer,
+                                       CollectionPolicy collectionPolicy);
+
+  static const int kMinimumPromotionLimit = 2 * MB;
+  static const int kMinimumAllocationLimit = 8 * MB;
+
+  inline static void UpdateOldSpaceLimits();
 
   // Allocate an uninitialized object in map space.  The behavior is identical
   // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index eef307d..e2f4547 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -993,6 +993,14 @@
 }
 
 
+void Assembler::dec_b(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFE);
+  emit_operand(ecx, dst);
+}
+
+
 void Assembler::dec(Register dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1511,32 +1519,6 @@
 }
 
 
-void Assembler::link_to(Label* L, Label* appendix) {
-  EnsureSpace ensure_space(this);
-  last_pc_ = NULL;
-  if (appendix->is_linked()) {
-    if (L->is_linked()) {
-      // Append appendix to L's list.
-      Label p;
-      Label q = *L;
-      do {
-        p = q;
-        Displacement disp = disp_at(&q);
-        disp.next(&q);
-      } while (q.is_linked());
-      Displacement disp = disp_at(&p);
-      disp.link_to(appendix);
-      disp_at_put(&p, disp);
-      p.Unuse();  // to avoid assertion failure in ~Label
-    } else {
-      // L is empty, simply use appendix.
-      *L = *appendix;
-    }
-  }
-  appendix->Unuse();  // appendix should not be used anymore
-}
-
-
 void Assembler::bind(Label* L) {
   EnsureSpace ensure_space(this);
   last_pc_ = NULL;
@@ -1545,6 +1527,19 @@
 }
 
 
+void Assembler::bind(NearLabel* L) {
+  ASSERT(!L->is_bound());
+  last_pc_ = NULL;
+  while (L->unresolved_branches_ > 0) {
+    int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
+    int disp = pc_offset() - branch_pos;
+    ASSERT(is_int8(disp));
+    set_byte_at(branch_pos - sizeof(int8_t), disp);
+    L->unresolved_branches_--;
+  }
+  L->bind_to(pc_offset());
+}
+
 void Assembler::call(Label* L) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1641,6 +1636,24 @@
 }
 
 
+void Assembler::jmp(NearLabel* L) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (L->is_bound()) {
+    const int short_size = 2;
+    int offs = L->pos() - pc_offset();
+    ASSERT(offs <= 0);
+    ASSERT(is_int8(offs - short_size));
+    // 1110 1011 #8-bit disp.
+    EMIT(0xEB);
+    EMIT((offs - short_size) & 0xFF);
+  } else {
+    EMIT(0xEB);
+    EMIT(0x00);      // The displacement will be resolved later.
+    L->link_to(pc_offset());
+  }
+}
+
 
 void Assembler::j(Condition cc, Label* L, Hint hint) {
   EnsureSpace ensure_space(this);
@@ -1696,6 +1709,27 @@
 }
 
 
+void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(0 <= cc && cc < 16);
+  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
+  if (L->is_bound()) {
+    const int short_size = 2;
+    int offs = L->pos() - pc_offset();
+    ASSERT(offs <= 0);
+    ASSERT(is_int8(offs - short_size));
+    // 0111 tttn #8-bit disp
+    EMIT(0x70 | cc);
+    EMIT((offs - short_size) & 0xFF);
+  } else {
+    EMIT(0x70 | cc);
+    EMIT(0x00);      // The displacement will be resolved later.
+    L->link_to(pc_offset());
+  }
+}
+
+
 // FPU instructions.
 
 void Assembler::fld(int i) {
@@ -2179,6 +2213,16 @@
 }
 
 
+void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x54);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2201,7 +2245,29 @@
 }
 
 
-void Assembler::movdqa(const Operand& dst, XMMRegister src ) {
+void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0xC2);
+  emit_sse_operand(dst, src);
+  EMIT(1);  // LT == 1
+}
+
+
+void Assembler::movaps(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0x28);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movdqa(const Operand& dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2358,6 +2424,19 @@
   emit_sse_operand(dst, src);
 }
 
+
+void Assembler::psllq(XMMRegister reg, int8_t imm8) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x73);
+  emit_sse_operand(esi, reg);  // esi == 6
+  EMIT(imm8);
+}
+
+
 void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
   Register ireg = { reg.code() };
   emit_operand(ireg, adr);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 928f172..1dab0a6 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -64,15 +64,15 @@
 // and best performance in optimized code.
 //
 struct Register {
-  bool is_valid() const  { return 0 <= code_ && code_ < 8; }
-  bool is(Register reg) const  { return code_ == reg.code_; }
+  bool is_valid() const { return 0 <= code_ && code_ < 8; }
+  bool is(Register reg) const { return code_ == reg.code_; }
   // eax, ebx, ecx and edx are byte registers, the rest are not.
-  bool is_byte_register() const  { return code_ <= 3; }
-  int code() const  {
+  bool is_byte_register() const { return code_ <= 3; }
+  int code() const {
     ASSERT(is_valid());
     return code_;
   }
-  int bit() const  {
+  int bit() const {
     ASSERT(is_valid());
     return 1 << code_;
   }
@@ -93,8 +93,8 @@
 
 
 struct XMMRegister {
-  bool is_valid() const  { return 0 <= code_ && code_ < 8; }
-  int code() const  {
+  bool is_valid() const { return 0 <= code_ && code_ < 8; }
+  int code() const {
     ASSERT(is_valid());
     return code_;
   }
@@ -595,6 +595,7 @@
   void cmp(const Operand& op, Handle<Object> handle);
 
   void dec_b(Register dst);
+  void dec_b(const Operand& dst);
 
   void dec(Register dst);
   void dec(const Operand& dst);
@@ -687,6 +688,7 @@
   // but it may be bound only once.
 
   void bind(Label* L);  // binds an unbound label L to the current code position
+  void bind(NearLabel* L);
 
   // Calls
   void call(Label* L);
@@ -701,11 +703,17 @@
   void jmp(const Operand& adr);
   void jmp(Handle<Code> code, RelocInfo::Mode rmode);
 
+  // Short jump
+  void jmp(NearLabel* L);
+
   // Conditional jumps
   void j(Condition cc, Label* L, Hint hint = no_hint);
   void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
   void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
 
+  // Conditional short jump
+  void j(Condition cc, NearLabel* L, Hint hint = no_hint);
+
   // Floating-point operations
   void fld(int i);
   void fstp(int i);
@@ -788,9 +796,15 @@
   void xorpd(XMMRegister dst, XMMRegister src);
   void sqrtsd(XMMRegister dst, XMMRegister src);
 
+  void andpd(XMMRegister dst, XMMRegister src);
+
   void ucomisd(XMMRegister dst, XMMRegister src);
   void movmskpd(Register dst, XMMRegister src);
 
+  void cmpltsd(XMMRegister dst, XMMRegister src);
+
+  void movaps(XMMRegister dst, XMMRegister src);
+
   void movdqa(XMMRegister dst, const Operand& src);
   void movdqa(const Operand& dst, XMMRegister src);
   void movdqu(XMMRegister dst, const Operand& src);
@@ -806,6 +820,8 @@
   void pxor(XMMRegister dst, XMMRegister src);
   void ptest(XMMRegister dst, XMMRegister src);
 
+  void psllq(XMMRegister reg, int8_t imm8);
+
   // Parallel XMM operations.
   void movntdqa(XMMRegister src, const Operand& dst);
   void movntdq(const Operand& dst, XMMRegister src);
@@ -839,9 +855,9 @@
   // Used for inline tables, e.g., jump-tables.
   void dd(uint32_t data, RelocInfo::Mode reloc_info);
 
-  int pc_offset() const  { return pc_ - buffer_; }
+  int pc_offset() const { return pc_ - buffer_; }
   int current_statement_position() const { return current_statement_position_; }
-  int current_position() const  { return current_position_; }
+  int current_position() const { return current_position_; }
 
   // Check if there is less than kGap bytes available in the buffer.
   // If this is the case, we need to grow the buffer before emitting
@@ -868,6 +884,7 @@
  private:
   byte* addr_at(int pos)  { return buffer_ + pos; }
   byte byte_at(int pos)  { return buffer_[pos]; }
+  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
   uint32_t long_at(int pos)  {
     return *reinterpret_cast<uint32_t*>(addr_at(pos));
   }
@@ -902,7 +919,6 @@
   // labels
   void print(Label* L);
   void bind_to(Label* L, int pos);
-  void link_to(Label* L, Label* appendix);
 
   // displacements
   inline Displacement disp_at(Label* L);
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index a095ef7..0ad3e6d 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -105,7 +105,11 @@
 
 
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function) {
+                                           bool is_api_function,
+                                           bool count_constructions) {
+  // Should never count constructions for api objects.
+  ASSERT(!is_api_function || !count_constructions);
+
   // Enter a construct frame.
   __ EnterConstructFrame();
 
@@ -148,6 +152,26 @@
     __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
     __ j(equal, &rt_call);
 
+    if (count_constructions) {
+      Label allocate;
+      // Decrease generous allocation count.
+      __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+      __ dec_b(FieldOperand(ecx, SharedFunctionInfo::kConstructionCountOffset));
+      __ j(not_zero, &allocate);
+
+      __ push(eax);
+      __ push(edi);
+
+      __ push(edi);  // constructor
+      // The call will replace the stub, so the countdown is only done once.
+      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+      __ pop(edi);
+      __ pop(eax);
+
+      __ bind(&allocate);
+    }
+
     // Now allocate the JSObject on the heap.
     // edi: constructor
     // eax: initial map
@@ -167,7 +191,12 @@
     // ebx: JSObject
     // edi: start of next object
     { Label loop, entry;
-      __ mov(edx, Factory::undefined_value());
+      // To allow for truncation.
+      if (count_constructions) {
+        __ mov(edx, Factory::one_pointer_filler_map());
+      } else {
+        __ mov(edx, Factory::undefined_value());
+      }
       __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
       __ jmp(&entry);
       __ bind(&loop);
@@ -351,13 +380,18 @@
 }
 
 
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, true);
+}
+
+
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false);
+  Generate_JSConstructStubHelper(masm, false, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, true);
+  Generate_JSConstructStubHelper(masm, true, false);
 }
 
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 366b91e..3e2b7ae 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -208,7 +208,7 @@
 
 // NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
 void ToBooleanStub::Generate(MacroAssembler* masm) {
-  Label false_result, true_result, not_string;
+  NearLabel false_result, true_result, not_string;
   __ mov(eax, Operand(esp, 1 * kPointerSize));
 
   // 'null' => false.
@@ -966,7 +966,7 @@
           __ bind(&non_smi_result);
           // Allocate a heap number if needed.
           __ mov(ebx, Operand(eax));  // ebx: result
-          Label skip_allocation;
+          NearLabel skip_allocation;
           switch (mode_) {
             case OVERWRITE_LEFT:
             case OVERWRITE_RIGHT:
@@ -1036,7 +1036,7 @@
       }
 
       // Test if left operand is a string.
-      Label lhs_not_string;
+      NearLabel lhs_not_string;
       __ test(lhs, Immediate(kSmiTagMask));
       __ j(zero, &lhs_not_string);
       __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
@@ -1045,7 +1045,7 @@
       StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
       __ TailCallStub(&string_add_left_stub);
 
-      Label call_runtime_with_args;
+      NearLabel call_runtime_with_args;
       // Left operand is not a string, test right.
       __ bind(&lhs_not_string);
       __ test(rhs, Immediate(kSmiTagMask));
@@ -1221,8 +1221,8 @@
   // Test that eax is a number.
   Label runtime_call;
   Label runtime_call_clear_stack;
-  Label input_not_smi;
-  Label loaded;
+  NearLabel input_not_smi;
+  NearLabel loaded;
   __ mov(eax, Operand(esp, kPointerSize));
   __ test(eax, Immediate(kSmiTagMask));
   __ j(not_zero, &input_not_smi);
@@ -1295,7 +1295,7 @@
   __ lea(ecx, Operand(ecx, ecx, times_2, 0));
   __ lea(ecx, Operand(eax, ecx, times_4, 0));
   // Check if cache matches: Double value is stored in uint32_t[2] array.
-  Label cache_miss;
+  NearLabel cache_miss;
   __ cmp(ebx, Operand(ecx, 0));
   __ j(not_equal, &cache_miss);
   __ cmp(edx, Operand(ecx, kIntSize));
@@ -1338,7 +1338,7 @@
 
 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
   // Only free register is edi.
-  Label done;
+  NearLabel done;
   ASSERT(type_ == TranscendentalCache::SIN ||
          type_ == TranscendentalCache::COS);
   // More transcendental types can be added later.
@@ -1346,7 +1346,7 @@
   // Both fsin and fcos require arguments in the range +/-2^63 and
   // return NaN for infinities and NaN. They can share all code except
   // the actual fsin/fcos operation.
-  Label in_range;
+  NearLabel in_range;
   // If argument is outside the range -2^63..2^63, fsin/cos doesn't
   // work. We must reduce it to the appropriate range.
   __ mov(edi, edx);
@@ -1357,7 +1357,7 @@
   __ j(below, &in_range, taken);
   // Check for infinity and NaN. Both return NaN for sin.
   __ cmp(Operand(edi), Immediate(0x7ff00000));
-  Label non_nan_result;
+  NearLabel non_nan_result;
   __ j(not_equal, &non_nan_result, taken);
   // Input is +/-Infinity or NaN. Result is NaN.
   __ fstp(0);
@@ -1377,7 +1377,7 @@
   __ fld(1);
   // FPU Stack: input, 2*pi, input.
   {
-    Label no_exceptions;
+    NearLabel no_exceptions;
     __ fwait();
     __ fnstsw_ax();
     // Clear if Illegal Operand or Zero Division exceptions are set.
@@ -1389,7 +1389,7 @@
 
   // Compute st(0) % st(1)
   {
-    Label partial_remainder_loop;
+    NearLabel partial_remainder_loop;
     __ bind(&partial_remainder_loop);
     __ fprem1();
     __ fwait();
@@ -1552,7 +1552,7 @@
     __ shr_cl(scratch2);
     // Now the unsigned answer is in scratch2.  We need to move it to ecx and
     // we may need to fix the sign.
-    Label negative;
+    NearLabel negative;
     __ xor_(ecx, Operand(ecx));
     __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
     __ j(greater, &negative);
@@ -1702,7 +1702,7 @@
 
 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
                                            Register number) {
-  Label load_smi, done;
+  NearLabel load_smi, done;
 
   __ test(number, Immediate(kSmiTagMask));
   __ j(zero, &load_smi, not_taken);
@@ -1720,7 +1720,7 @@
 
 
 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
-  Label load_smi_edx, load_eax, load_smi_eax, done;
+  NearLabel load_smi_edx, load_eax, load_smi_eax, done;
   // Load operand in edx into xmm0.
   __ test(edx, Immediate(kSmiTagMask));
   __ j(zero, &load_smi_edx, not_taken);  // Argument in edx is a smi.
@@ -1750,7 +1750,7 @@
 
 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
                                            Label* not_numbers) {
-  Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
+  NearLabel load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
   // Load operand in edx into xmm0, or branch to not_numbers.
   __ test(edx, Immediate(kSmiTagMask));
   __ j(zero, &load_smi_edx, not_taken);  // Argument in edx is a smi.
@@ -1798,7 +1798,7 @@
 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
                                             Register scratch,
                                             ArgLocation arg_location) {
-  Label load_smi_1, load_smi_2, done_load_1, done;
+  NearLabel load_smi_1, load_smi_2, done_load_1, done;
   if (arg_location == ARGS_IN_REGISTERS) {
     __ mov(scratch, edx);
   } else {
@@ -1857,7 +1857,7 @@
 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
                                              Label* non_float,
                                              Register scratch) {
-  Label test_other, done;
+  NearLabel test_other, done;
   // Test if both operands are floats or smi -> scratch=k_is_float;
   // Otherwise scratch = k_not_float.
   __ test(edx, Immediate(kSmiTagMask));
@@ -1879,36 +1879,36 @@
 
 
 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
-  Label slow, done;
+  Label slow, done, undo;
 
   if (op_ == Token::SUB) {
-    // Check whether the value is a smi.
-    Label try_float;
-    __ test(eax, Immediate(kSmiTagMask));
-    __ j(not_zero, &try_float, not_taken);
+    if (include_smi_code_) {
+      // Check whether the value is a smi.
+      NearLabel try_float;
+      __ test(eax, Immediate(kSmiTagMask));
+      __ j(not_zero, &try_float, not_taken);
 
-    if (negative_zero_ == kStrictNegativeZero) {
-      // Go slow case if the value of the expression is zero
-      // to make sure that we switch between 0 and -0.
-      __ test(eax, Operand(eax));
-      __ j(zero, &slow, not_taken);
+      if (negative_zero_ == kStrictNegativeZero) {
+        // Go slow case if the value of the expression is zero
+        // to make sure that we switch between 0 and -0.
+        __ test(eax, Operand(eax));
+        __ j(zero, &slow, not_taken);
+      }
+
+      // The value of the expression is a smi that is not zero.  Try
+      // optimistic subtraction '0 - value'.
+      __ mov(edx, Operand(eax));
+      __ Set(eax, Immediate(0));
+      __ sub(eax, Operand(edx));
+      __ j(overflow, &undo, not_taken);
+      __ StubReturn(1);
+
+      // Try floating point case.
+      __ bind(&try_float);
+    } else if (FLAG_debug_code) {
+      __ AbortIfSmi(eax);
     }
 
-    // The value of the expression is a smi that is not zero.  Try
-    // optimistic subtraction '0 - value'.
-    Label undo;
-    __ mov(edx, Operand(eax));
-    __ Set(eax, Immediate(0));
-    __ sub(eax, Operand(edx));
-    __ j(no_overflow, &done, taken);
-
-    // Restore eax and go slow case.
-    __ bind(&undo);
-    __ mov(eax, Operand(edx));
-    __ jmp(&slow);
-
-    // Try floating point case.
-    __ bind(&try_float);
     __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
     __ cmp(edx, Factory::heap_number_map());
     __ j(not_equal, &slow);
@@ -1928,6 +1928,18 @@
       __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
     }
   } else if (op_ == Token::BIT_NOT) {
+    if (include_smi_code_) {
+      Label non_smi;
+      __ test(eax, Immediate(kSmiTagMask));
+      __ j(not_zero, &non_smi);
+      __ not_(eax);
+      __ and_(eax, ~kSmiTagMask);  // Remove inverted smi-tag.
+      __ ret(0);
+      __ bind(&non_smi);
+    } else if (FLAG_debug_code) {
+      __ AbortIfSmi(eax);
+    }
+
     // Check if the operand is a heap number.
     __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
     __ cmp(edx, Factory::heap_number_map());
@@ -1941,7 +1953,7 @@
                    &slow);
 
     // Do the bitwise operation and check if the result fits in a smi.
-    Label try_float;
+    NearLabel try_float;
     __ not_(ecx);
     __ cmp(ecx, 0xc0000000);
     __ j(sign, &try_float, not_taken);
@@ -1978,6 +1990,10 @@
   __ bind(&done);
   __ StubReturn(1);
 
+  // Restore eax and go slow case.
+  __ bind(&undo);
+  __ mov(eax, Operand(edx));
+
   // Handle the slow case by jumping to the JavaScript builtin.
   __ bind(&slow);
   __ pop(ecx);  // pop return address.
@@ -2010,7 +2026,7 @@
   __ j(not_zero, &slow, not_taken);
 
   // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor;
+  NearLabel adaptor;
   __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
   __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -2087,7 +2103,7 @@
 
   // Try the new space allocation. Start out with computing the size of
   // the arguments object and the elements array.
-  Label add_arguments_object;
+  NearLabel add_arguments_object;
   __ bind(&try_allocate);
   __ test(ecx, Operand(ecx));
   __ j(zero, &add_arguments_object);
@@ -2139,7 +2155,7 @@
   __ SmiUntag(ecx);
 
   // Copy the fixed array slots.
-  Label loop;
+  NearLabel loop;
   __ bind(&loop);
   __ mov(ebx, Operand(edx, -1 * kPointerSize));  // Skip receiver.
   __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
@@ -2367,7 +2383,7 @@
 
   // Argument 4: End of string data
   // Argument 3: Start of string data
-  Label setup_two_byte, setup_rest;
+  NearLabel setup_two_byte, setup_rest;
   __ test(edi, Operand(edi));
   __ mov(edi, FieldOperand(eax, String::kLengthOffset));
   __ j(zero, &setup_two_byte);
@@ -2461,7 +2477,7 @@
   // ebx: last_match_info backing store (FixedArray)
   // ecx: offsets vector
   // edx: number of capture registers
-  Label next_capture, done;
+  NearLabel next_capture, done;
   // Capture register counter starts from number of capture registers and
   // counts down until wraping after zero.
   __ bind(&next_capture);
@@ -2517,13 +2533,13 @@
   // number string cache for smis is just the smi value, and the hash for
   // doubles is the xor of the upper and lower words. See
   // Heap::GetNumberStringCache.
-  Label smi_hash_calculated;
-  Label load_result_from_cache;
+  NearLabel smi_hash_calculated;
+  NearLabel load_result_from_cache;
   if (object_is_smi) {
     __ mov(scratch, object);
     __ SmiUntag(scratch);
   } else {
-    Label not_smi, hash_calculated;
+    NearLabel not_smi, hash_calculated;
     STATIC_ASSERT(kSmiTag == 0);
     __ test(object, Immediate(kSmiTagMask));
     __ j(not_zero, &not_smi);
@@ -2613,6 +2629,27 @@
 
   Label check_unequal_objects, done;
 
+  // Compare two smis if required.
+  if (include_smi_compare_) {
+    Label non_smi, smi_done;
+    __ mov(ecx, Operand(edx));
+    __ or_(ecx, Operand(eax));
+    __ test(ecx, Immediate(kSmiTagMask));
+    __ j(not_zero, &non_smi, not_taken);
+    __ sub(edx, Operand(eax));  // Return on the result of the subtraction.
+    __ j(no_overflow, &smi_done);
+    __ neg(edx);  // Correct sign in case of overflow.
+    __ bind(&smi_done);
+    __ mov(eax, edx);
+    __ ret(0);
+    __ bind(&non_smi);
+  } else if (FLAG_debug_code) {
+    __ mov(ecx, Operand(edx));
+    __ or_(ecx, Operand(eax));
+    __ test(ecx, Immediate(kSmiTagMask));
+    __ Assert(not_zero, "Unexpected smi operands.");
+  }
+
   // NOTICE! This code is only reached after a smi-fast-case check, so
   // it is certain that at least one operand isn't a smi.
 
@@ -2626,7 +2663,7 @@
     if (cc_ != equal) {
       // Check for undefined.  undefined OP undefined is false even though
       // undefined == undefined.
-      Label check_for_nan;
+      NearLabel check_for_nan;
       __ cmp(edx, Factory::undefined_value());
       __ j(not_equal, &check_for_nan);
       __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
@@ -2641,7 +2678,7 @@
       __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
       __ ret(0);
     } else {
-      Label heap_number;
+      NearLabel heap_number;
       __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
              Immediate(Factory::heap_number_map()));
       __ j(equal, &heap_number);
@@ -2676,7 +2713,7 @@
         __ setcc(above_equal, eax);
         __ ret(0);
       } else {
-        Label nan;
+        NearLabel nan;
         __ j(above_equal, &nan);
         __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
         __ ret(0);
@@ -2693,7 +2730,7 @@
   // Non-strict object equality is slower, so it is handled later in the stub.
   if (cc_ == equal && strict_) {
     Label slow;  // Fallthrough label.
-    Label not_smis;
+    NearLabel not_smis;
     // If we're doing a strict equality comparison, we don't have to do
     // type conversion, so we generate code to do fast comparison for objects
     // and oddballs. Non-smi numbers and strings still go through the usual
@@ -2734,13 +2771,13 @@
 
     // Get the type of the first operand.
     // If the first object is a JS object, we have done pointer comparison.
-    Label first_non_object;
+    NearLabel first_non_object;
     STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
     __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
     __ j(below, &first_non_object);
 
     // Return non-zero (eax is not zero)
-    Label return_not_equal;
+    NearLabel return_not_equal;
     STATIC_ASSERT(kHeapObjectTag != 0);
     __ bind(&return_not_equal);
     __ ret(0);
@@ -2791,7 +2828,7 @@
       // Don't base result on EFLAGS when a NaN is involved.
       __ j(parity_even, &unordered, not_taken);
 
-      Label below_label, above_label;
+      NearLabel below_label, above_label;
       // Return a result of -1, 0, or 1, based on EFLAGS.
       __ j(below, &below_label, not_taken);
       __ j(above, &above_label, not_taken);
@@ -2856,8 +2893,8 @@
     // Non-strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
-    Label not_both_objects;
-    Label return_unequal;
+    NearLabel not_both_objects;
+    NearLabel return_unequal;
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
@@ -3019,7 +3056,7 @@
   // not NULL.  The frame pointer is NULL in the exception handler of
   // a JS entry frame.
   __ xor_(esi, Operand(esi));  // Tentatively set context pointer to NULL.
-  Label skip;
+  NearLabel skip;
   __ cmp(ebp, 0);
   __ j(equal, &skip, not_taken);
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -3151,7 +3188,7 @@
   // Make sure we're not trying to return 'the hole' from the runtime
   // call as this may lead to crashes in the IC code later.
   if (FLAG_debug_code) {
-    Label okay;
+    NearLabel okay;
     __ cmp(eax, Factory::the_hole_value());
     __ j(not_equal, &okay);
     __ int3();
@@ -3213,7 +3250,7 @@
   __ mov(esp, Operand::StaticVariable(handler_address));
 
   // Unwind the handlers until the ENTRY handler is found.
-  Label loop, done;
+  NearLabel loop, done;
   __ bind(&loop);
   // Load the type of the current stack handler.
   const int kStateOffset = StackHandlerConstants::kStateOffset;
@@ -3431,7 +3468,7 @@
   // edx is function, eax is map.
 
   // Look up the function and the map in the instanceof cache.
-  Label miss;
+  NearLabel miss;
   ExternalReference roots_address = ExternalReference::roots_address();
   __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
   __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
@@ -3463,7 +3500,7 @@
   __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
 
   // Loop through the prototype chain looking for the function prototype.
-  Label loop, is_instance, is_not_instance;
+  NearLabel loop, is_instance, is_not_instance;
   __ bind(&loop);
   __ cmp(ecx, Operand(ebx));
   __ j(equal, &is_instance);
@@ -3501,7 +3538,8 @@
          | RegisterField::encode(false)   // lhs_ and rhs_ are not used
          | StrictField::encode(strict_)
          | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
-         | IncludeNumberCompareField::encode(include_number_compare_);
+         | IncludeNumberCompareField::encode(include_number_compare_)
+         | IncludeSmiCompareField::encode(include_smi_compare_);
 }
 
 
@@ -3541,12 +3579,18 @@
     include_number_compare_name = "_NO_NUMBER";
   }
 
+  const char* include_smi_compare_name = "";
+  if (!include_smi_compare_) {
+    include_smi_compare_name = "_NO_SMI";
+  }
+
   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "CompareStub_%s%s%s%s",
+               "CompareStub_%s%s%s%s%s",
                cc_name,
                strict_name,
                never_nan_nan_name,
-               include_number_compare_name);
+               include_number_compare_name,
+               include_smi_compare_name);
   return name_;
 }
 
@@ -3793,7 +3837,7 @@
   // eax: first string
   // edx: second string
   // Check if either of the strings are empty. In that case return the other.
-  Label second_not_zero_length, both_not_zero_length;
+  NearLabel second_not_zero_length, both_not_zero_length;
   __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
   STATIC_ASSERT(kSmiTag == 0);
   __ test(ecx, Operand(ecx));
@@ -4079,7 +4123,7 @@
                                           Register count,
                                           Register scratch,
                                           bool ascii) {
-  Label loop;
+  NearLabel loop;
   __ bind(&loop);
   // This loop just copies one character at a time, as it is only used for very
   // short strings.
@@ -4126,7 +4170,7 @@
   }
 
   // Don't enter the rep movs if there are less than 4 bytes to copy.
-  Label last_bytes;
+  NearLabel last_bytes;
   __ test(count, Immediate(~3));
   __ j(zero, &last_bytes);
 
@@ -4146,7 +4190,7 @@
   __ j(zero, &done);
 
   // Copy remaining characters.
-  Label loop;
+  NearLabel loop;
   __ bind(&loop);
   __ mov_b(scratch, Operand(src, 0));
   __ mov_b(Operand(dest, 0), scratch);
@@ -4172,7 +4216,7 @@
 
   // Make sure that both characters are not digits as such strings has a
   // different hash algorithm. Don't try to look for these in the symbol table.
-  Label not_array_index;
+  NearLabel not_array_index;
   __ mov(scratch, c1);
   __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
   __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
@@ -4330,7 +4374,7 @@
   __ add(hash, Operand(scratch));
 
   // if (hash == 0) hash = 27;
-  Label hash_not_zero;
+  NearLabel hash_not_zero;
   __ test(hash, Operand(hash));
   __ j(not_zero, &hash_not_zero);
   __ mov(hash, Immediate(27));
@@ -4499,7 +4543,7 @@
   __ IncrementCounter(&Counters::string_compare_native, 1);
 
   // Find minimum length.
-  Label left_shorter;
+  NearLabel left_shorter;
   __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
   __ mov(scratch3, scratch1);
   __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
@@ -4535,7 +4579,7 @@
 
   {
     // Compare loop.
-    Label loop;
+    NearLabel loop;
     __ bind(&loop);
     // Compare characters.
     __ mov_b(scratch2, Operand(left, index, times_1, 0));
@@ -4581,7 +4625,7 @@
   __ mov(edx, Operand(esp, 2 * kPointerSize));  // left
   __ mov(eax, Operand(esp, 1 * kPointerSize));  // right
 
-  Label not_same;
+  NearLabel not_same;
   __ cmp(edx, Operand(eax));
   __ j(not_equal, &not_same);
   STATIC_ASSERT(EQUAL == 0);
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 854052a..9c8573c 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -249,7 +249,7 @@
       // the function.
       for (int i = 0; i < scope()->num_parameters(); i++) {
         Variable* par = scope()->parameter(i);
-        Slot* slot = par->slot();
+        Slot* slot = par->AsSlot();
         if (slot != NULL && slot->type() == Slot::CONTEXT) {
           // The use of SlotOperand below is safe in unspilled code
           // because the slot is guaranteed to be a context slot.
@@ -285,7 +285,7 @@
     // Initialize ThisFunction reference if present.
     if (scope()->is_function_scope() && scope()->function() != NULL) {
       frame_->Push(Factory::the_hole_value());
-      StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
+      StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
     }
 
 
@@ -717,10 +717,10 @@
     Property property(&global, &key, RelocInfo::kNoPosition);
     Reference ref(this, &property);
     ref.GetValue();
-  } else if (variable != NULL && variable->slot() != NULL) {
+  } else if (variable != NULL && variable->AsSlot() != NULL) {
     // For a variable that rewrites to a slot, we signal it is the immediate
     // subexpression of a typeof.
-    LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+    LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
   } else {
     // Anything else can be handled normally.
     Load(expr);
@@ -759,17 +759,17 @@
     frame_->Push(&result);
   }
 
-  Variable* arguments = scope()->arguments()->var();
-  Variable* shadow = scope()->arguments_shadow()->var();
-  ASSERT(arguments != NULL && arguments->slot() != NULL);
-  ASSERT(shadow != NULL && shadow->slot() != NULL);
+  Variable* arguments = scope()->arguments();
+  Variable* shadow = scope()->arguments_shadow();
+  ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
+  ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
   JumpTarget done;
   bool skip_arguments = false;
   if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
     // We have to skip storing into the arguments slot if it has
     // already been written to. This can happen if the a function
     // has a local variable named 'arguments'.
-    LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
+    LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
     Result probe = frame_->Pop();
     if (probe.is_constant()) {
       // We have to skip updating the arguments object if it has
@@ -782,10 +782,10 @@
     }
   }
   if (!skip_arguments) {
-    StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+    StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
     if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
   }
-  StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+  StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
   return frame_->Pop();
 }
 
@@ -842,7 +842,7 @@
       LoadGlobal();
       ref->set_type(Reference::NAMED);
     } else {
-      ASSERT(var->slot() != NULL);
+      ASSERT(var->AsSlot() != NULL);
       ref->set_type(Reference::SLOT);
     }
   } else {
@@ -2646,6 +2646,19 @@
 }
 
 
+static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
+                                        bool inline_number_compare) {
+  CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
+  if (nan_info == kCantBothBeNaN) {
+    flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
+  }
+  if (inline_number_compare) {
+    flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
+  }
+  return flags;
+}
+
+
 void CodeGenerator::Comparison(AstNode* node,
                                Condition cc,
                                bool strict,
@@ -2773,7 +2786,9 @@
 
       // Setup and call the compare stub.
       is_not_string.Bind(&left_side);
-      CompareStub stub(cc, strict, kCantBothBeNaN);
+      CompareFlags flags =
+          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_COMPARE_IN_STUB);
+      CompareStub stub(cc, strict, flags);
       Result result = frame_->CallStub(&stub, &left_side, &right_side);
       result.ToRegister();
       __ cmp(result.reg(), 0);
@@ -2867,7 +2882,8 @@
 
       // End of in-line compare, call out to the compare stub. Don't include
       // number comparison in the stub if it was inlined.
-      CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+      CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
+      CompareStub stub(cc, strict, flags);
       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
       __ test(answer.reg(), Operand(answer.reg()));
       answer.Unuse();
@@ -2900,7 +2916,9 @@
 
         // End of in-line compare, call out to the compare stub. Don't include
         // number comparison in the stub if it was inlined.
-        CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+        CompareFlags flags =
+            ComputeCompareFlags(nan_info, inline_number_compare);
+        CompareStub stub(cc, strict, flags);
         Result answer = frame_->CallStub(&stub, &left_side, &right_side);
         __ test(answer.reg(), Operand(answer.reg()));
         answer.Unuse();
@@ -2994,7 +3012,6 @@
         dest->false_target()->Branch(zero);
       } else {
         // Do the smi check, then the comparison.
-        JumpTarget is_not_smi;
         __ test(left_reg, Immediate(kSmiTagMask));
         is_smi.Branch(zero, left_side, right_side);
       }
@@ -3031,7 +3048,9 @@
       }
 
       // Setup and call the compare stub.
-      CompareStub stub(cc, strict, kCantBothBeNaN);
+      CompareFlags flags =
+          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
+      CompareStub stub(cc, strict, flags);
       Result result = frame_->CallStub(&stub, left_side, right_side);
       result.ToRegister();
       __ test(result.reg(), Operand(result.reg()));
@@ -3255,7 +3274,7 @@
   // Load the receiver and the existing arguments object onto the
   // expression stack. Avoid allocating the arguments object here.
   Load(receiver);
-  LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+  LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
 
   // Emit the source position information after having loaded the
   // receiver and the arguments.
@@ -3517,7 +3536,7 @@
   Comment cmnt(masm_, "[ Declaration");
   Variable* var = node->proxy()->var();
   ASSERT(var != NULL);  // must have been resolved
-  Slot* slot = var->slot();
+  Slot* slot = var->AsSlot();
 
   // If it was not possible to allocate the variable at compile time,
   // we need to "declare" it at runtime to make sure it actually
@@ -4233,7 +4252,7 @@
   // the bottom check of the loop condition.
   if (node->is_fast_smi_loop()) {
     // Set number type of the loop variable to smi.
-    SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
+    SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
   }
 
   Visit(node->body());
@@ -4259,7 +4278,7 @@
   // expression if we are in a fast smi loop condition.
   if (node->is_fast_smi_loop() && has_valid_frame()) {
     // Set number type of the loop variable to smi.
-    SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
+    SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
   }
 
   // Based on the condition analysis, compile the backward jump as
@@ -4558,8 +4577,8 @@
 
   // Store the caught exception in the catch variable.
   Variable* catch_var = node->catch_var()->var();
-  ASSERT(catch_var != NULL && catch_var->slot() != NULL);
-  StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
+  ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
+  StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
 
   // Remove the exception from the stack.
   frame_->Drop();
@@ -5154,7 +5173,7 @@
     done->Jump(result);
 
   } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
     Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
     if (potential_slot != NULL) {
       // Generate fast case for locals that rewrite to slots.
@@ -5187,7 +5206,7 @@
           Result arguments = allocator()->Allocate();
           ASSERT(arguments.is_valid());
           __ mov(arguments.reg(),
-                 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
                                                    arguments,
                                                    slow));
           frame_->Push(&arguments);
@@ -5695,7 +5714,7 @@
   Comment cmnt(masm(), "[ Variable Assignment");
   Variable* var = node->target()->AsVariableProxy()->AsVariable();
   ASSERT(var != NULL);
-  Slot* slot = var->slot();
+  Slot* slot = var->AsSlot();
   ASSERT(slot != NULL);
 
   // Evaluate the right-hand side.
@@ -6044,14 +6063,14 @@
     // in generated code. If we succeed, there is no need to perform a
     // context lookup in the runtime system.
     JumpTarget done;
-    if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
-      ASSERT(var->slot()->type() == Slot::LOOKUP);
+    if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+      ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
       JumpTarget slow;
       // Prepare the stack for the call to
       // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
       // function, the first argument to the eval call and the
       // receiver.
-      Result fun = LoadFromGlobalSlotCheckExtensions(var->slot(),
+      Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
                                                      NOT_INSIDE_TYPEOF,
                                                      &slow);
       frame_->Push(&fun);
@@ -6134,8 +6153,8 @@
     frame_->RestoreContextRegister();
     frame_->Push(&result);
 
-  } else if (var != NULL && var->slot() != NULL &&
-             var->slot()->type() == Slot::LOOKUP) {
+  } else if (var != NULL && var->AsSlot() != NULL &&
+             var->AsSlot()->type() == Slot::LOOKUP) {
     // ----------------------------------
     // JavaScript examples:
     //
@@ -6154,7 +6173,7 @@
     // Generate fast case for loading functions from slots that
     // correspond to local/global variables or arguments unless they
     // are shadowed by eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(var->slot(),
+    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
                                     NOT_INSIDE_TYPEOF,
                                     &function,
                                     &slow,
@@ -8034,7 +8053,7 @@
 
     Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
     if (variable != NULL) {
-      Slot* slot = variable->slot();
+      Slot* slot = variable->AsSlot();
       if (variable->is_global()) {
         LoadGlobal();
         frame_->Push(variable->name());
@@ -8146,6 +8165,7 @@
           GenericUnaryOpStub stub(
               Token::SUB,
               overwrite,
+              NO_UNARY_FLAGS,
               no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
           Result operand = frame_->Pop();
           Result answer = frame_->CallStub(&stub, &operand);
@@ -8173,7 +8193,9 @@
             __ test(operand.reg(), Immediate(kSmiTagMask));
             smi_label.Branch(zero, &operand, taken);
 
-            GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+            GenericUnaryOpStub stub(Token::BIT_NOT,
+                                    overwrite,
+                                    NO_UNARY_SMI_CODE_IN_STUB);
             Result answer = frame_->CallStub(&stub, &operand);
             continue_label.Jump(&answer);
 
@@ -9122,9 +9144,15 @@
  public:
   DeferredReferenceGetNamedValue(Register dst,
                                  Register receiver,
-                                 Handle<String> name)
-      : dst_(dst), receiver_(receiver),  name_(name) {
-    set_comment("[ DeferredReferenceGetNamedValue");
+                                 Handle<String> name,
+                                 bool is_contextual)
+      : dst_(dst),
+        receiver_(receiver),
+        name_(name),
+        is_contextual_(is_contextual) {
+    set_comment(is_contextual
+                ? "[ DeferredReferenceGetNamedValue (contextual)"
+                : "[ DeferredReferenceGetNamedValue");
   }
 
   virtual void Generate();
@@ -9136,6 +9164,7 @@
   Register dst_;
   Register receiver_;
   Handle<String> name_;
+  bool is_contextual_;
 };
 
 
@@ -9145,9 +9174,15 @@
   }
   __ Set(ecx, Immediate(name_));
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  // The call must be followed by a test eax instruction to indicate
-  // that the inobject property case was inlined.
+  RelocInfo::Mode mode = is_contextual_
+      ? RelocInfo::CODE_TARGET_CONTEXT
+      : RelocInfo::CODE_TARGET;
+  __ call(ic, mode);
+  // The call must be followed by:
+  // - a test eax instruction to indicate that the inobject property
+  //   case was inlined.
+  // - a mov ecx instruction to indicate that the contextual property
+  //   load was inlined.
   //
   // Store the delta to the map check instruction here in the test
   // instruction.  Use masm_-> instead of the __ macro since the
@@ -9155,8 +9190,13 @@
   int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
   // Here we use masm_-> instead of the __ macro because this is the
   // instruction that gets patched and coverage code gets in the way.
-  masm_->test(eax, Immediate(-delta_to_patch_site));
-  __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+  if (is_contextual_) {
+    masm_->mov(ecx, -delta_to_patch_site);
+    __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+  } else {
+    masm_->test(eax, Immediate(-delta_to_patch_site));
+    __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+  }
 
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
@@ -9327,12 +9367,17 @@
 #ifdef DEBUG
   int original_height = frame()->height();
 #endif
+
+  bool contextual_load_in_builtin =
+      is_contextual &&
+      (Bootstrapper::IsActive() ||
+       (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
+
   Result result;
-  // Do not inline the inobject property case for loads from the global
-  // object.  Also do not inline for unoptimized code.  This saves time in
-  // the code generator.  Unoptimized code is toplevel code or code that is
-  // not in a loop.
-  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+  // Do not inline in the global code or when not in loop.
+  if (scope()->is_global_scope() ||
+      loop_nesting() == 0 ||
+      contextual_load_in_builtin) {
     Comment cmnt(masm(), "[ Load from named Property");
     frame()->Push(name);
 
@@ -9345,19 +9390,26 @@
     // instruction here.
     __ nop();
   } else {
-    // Inline the inobject property case.
-    Comment cmnt(masm(), "[ Inlined named property load");
+    // Inline the property load.
+    Comment cmnt(masm(), is_contextual
+                         ? "[ Inlined contextual property load"
+                         : "[ Inlined named property load");
     Result receiver = frame()->Pop();
     receiver.ToRegister();
 
     result = allocator()->Allocate();
     ASSERT(result.is_valid());
     DeferredReferenceGetNamedValue* deferred =
-        new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
+        new DeferredReferenceGetNamedValue(result.reg(),
+                                           receiver.reg(),
+                                           name,
+                                           is_contextual);
 
-    // Check that the receiver is a heap object.
-    __ test(receiver.reg(), Immediate(kSmiTagMask));
-    deferred->Branch(zero);
+    if (!is_contextual) {
+      // Check that the receiver is a heap object.
+      __ test(receiver.reg(), Immediate(kSmiTagMask));
+      deferred->Branch(zero);
+    }
 
     __ bind(deferred->patch_site());
     // This is the map check instruction that will be patched (so we can't
@@ -9369,17 +9421,33 @@
     // which allows the assert below to succeed and patching to work.
     deferred->Branch(not_equal);
 
-    // The delta from the patch label to the load offset must be statically
-    // known.
+    // The delta from the patch label to the actual load must be
+    // statically known.
     ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
            LoadIC::kOffsetToLoadInstruction);
-    // The initial (invalid) offset has to be large enough to force a 32-bit
-    // instruction encoding to allow patching with an arbitrary offset.  Use
-    // kMaxInt (minus kHeapObjectTag).
-    int offset = kMaxInt;
-    masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
 
-    __ IncrementCounter(&Counters::named_load_inline, 1);
+    if (is_contextual) {
+      // Load the (initialy invalid) cell and get its value.
+      masm()->mov(result.reg(), Factory::null_value());
+      if (FLAG_debug_code) {
+        __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
+               Factory::global_property_cell_map());
+        __ Assert(equal, "Uninitialized inlined contextual load");
+      }
+      __ mov(result.reg(),
+             FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
+      __ cmp(result.reg(), Factory::the_hole_value());
+      deferred->Branch(equal);
+      __ IncrementCounter(&Counters::named_load_global_inline, 1);
+    } else {
+      // The initial (invalid) offset has to be large enough to force a 32-bit
+      // instruction encoding to allow patching with an arbitrary offset.  Use
+      // kMaxInt (minus kHeapObjectTag).
+      int offset = kMaxInt;
+      masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
+      __ IncrementCounter(&Counters::named_load_inline, 1);
+    }
+
     deferred->BindExit();
   }
   ASSERT(frame()->height() == original_height - 1);
@@ -9719,7 +9787,7 @@
   switch (type_) {
     case SLOT: {
       Comment cmnt(masm, "[ Load from Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
       ASSERT(slot != NULL);
       cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
       if (!persist_after_get_) set_unloaded();
@@ -9764,7 +9832,7 @@
     return;
   }
 
-  Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+  Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
   ASSERT(slot != NULL);
   if (slot->type() == Slot::LOOKUP ||
       slot->type() == Slot::CONTEXT ||
@@ -9797,7 +9865,7 @@
   switch (type_) {
     case SLOT: {
       Comment cmnt(masm, "[ Store to Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
       ASSERT(slot != NULL);
       cgen_->StoreToSlot(slot, init_state);
       set_unloaded();
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index adc0005..c4a03d1 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -345,10 +345,6 @@
   bool in_spilled_code() const { return in_spilled_code_; }
   void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
 
-  // If the name is an inline runtime function call return the number of
-  // expected arguments. Otherwise return -1.
-  static int InlineRuntimeCallArgumentsCount(Handle<String> name);
-
   // Return a position of the element at |index_as_smi| + |additional_offset|
   // in FixedArray pointer to which is held in |array|.  |index_as_smi| is Smi.
   static Operand FixedArrayElementOperand(Register array,
@@ -363,6 +359,12 @@
   }
 
  private:
+  // Type of a member function that generates inline code for a native function.
+  typedef void (CodeGenerator::*InlineFunctionGenerator)
+      (ZoneList<Expression*>*);
+
+  static const InlineFunctionGenerator kInlineFunctionGenerators[];
+
   // Construction/Destruction
   explicit CodeGenerator(MacroAssembler* masm);
 
@@ -624,13 +626,9 @@
 
   void CheckStack();
 
-  struct InlineRuntimeLUT {
-    void (CodeGenerator::*method)(ZoneList<Expression*>*);
-    const char* name;
-    int nargs;
-  };
+  static InlineFunctionGenerator FindInlineFunctionGenerator(
+      Runtime::FunctionId function_id);
 
-  static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
   bool CheckForInlineRuntimeCall(CallRuntime* node);
 
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
@@ -792,8 +790,6 @@
   // in a spilled state.
   bool in_spilled_code_;
 
-  static InlineRuntimeLUT kInlineRuntimeLUT[];
-
   friend class VirtualFrame;
   friend class JumpTarget;
   friend class Reference;
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 64305ef..52c2b38 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -685,7 +685,8 @@
 
     case 0xDD: switch (regop) {
         case 0: mnem = "fld_d"; break;
-        case 2: mnem = "fstp"; break;
+        case 1: mnem = "fisttp_d"; break;
+        case 2: mnem = "fst_d"; break;
         case 3: mnem = "fstp_d"; break;
         default: UnimplementedInstruction();
       }
@@ -717,6 +718,10 @@
 
     case 0xD9:
       switch (modrm_byte & 0xF8) {
+        case 0xC0:
+          mnem = "fld";
+          has_register = true;
+          break;
         case 0xC8:
           mnem = "fxch";
           has_register = true;
@@ -957,6 +962,14 @@
           } else if (f0byte == 0xA2 || f0byte == 0x31) {
             AppendToBuffer("%s", f0mnem);
             data += 2;
+          } else if (f0byte == 0x28) {
+            data += 2;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("movaps %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else if ((f0byte & 0xF0) == 0x80) {
             data += JumpConditional(data, branch_hint);
           } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
@@ -1156,6 +1169,23 @@
                             NameOfXMMRegister(regop),
                             NameOfXMMRegister(rm));
              data++;
+          } else if (*data == 0x73) {
+             data++;
+             int mod, regop, rm;
+             get_modrm(*data, &mod, &regop, &rm);
+             int8_t imm8 = static_cast<int8_t>(data[1]);
+             AppendToBuffer("psllq %s,%d",
+                            NameOfXMMRegister(rm),
+                            static_cast<int>(imm8));
+             data += 2;
+          } else if (*data == 0x54) {
+             data++;
+             int mod, regop, rm;
+             get_modrm(*data, &mod, &regop, &rm);
+             AppendToBuffer("andpd %s,%s",
+                            NameOfXMMRegister(regop),
+                            NameOfXMMRegister(rm));
+             data++;
           } else {
             UnimplementedInstruction();
           }
@@ -1168,12 +1198,12 @@
         { data++;
           int mod, regop, rm;
           get_modrm(*data, &mod, &regop, &rm);
-          if (mod == 3 && regop == ecx) {
-            AppendToBuffer("dec_b %s", NameOfCPURegister(rm));
+          if (regop == ecx) {
+            AppendToBuffer("dec_b ");
+            data += PrintRightOperand(data);
           } else {
             UnimplementedInstruction();
           }
-          data++;
         }
         break;
 
@@ -1274,6 +1304,23 @@
                                NameOfXMMRegister(rm));
                 data++;
               }
+            } else if (b2 == 0xC2) {
+              // Intel manual 2A, Table 3-18.
+              const char* const pseudo_op[] = {
+                "cmpeqsd",
+                "cmpltsd",
+                "cmplesd",
+                "cmpunordsd",
+                "cmpneqsd",
+                "cmpnltsd",
+                "cmpnlesd",
+                "cmpordsd"
+              };
+              AppendToBuffer("%s %s,%s",
+                             pseudo_op[data[1]],
+                             NameOfXMMRegister(regop),
+                             NameOfXMMRegister(rm));
+              data += 2;
             } else {
               if (mod != 0x3) {
                 AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
@@ -1367,7 +1414,7 @@
                                      " %s",
                                      tmp_buffer_.start());
   return instr_len;
-}
+}  // NOLINT (function is too long)
 
 
 //------------------------------------------------------------------------------
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
index 9baf763..dd44f0e 100644
--- a/src/ia32/frames-ia32.cc
+++ b/src/ia32/frames-ia32.cc
@@ -35,16 +35,8 @@
 namespace internal {
 
 
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
-  if (fp == 0) return NONE;
-  // Compute the stack pointer.
-  Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-  // Fill in the state.
-  state->fp = fp;
-  state->sp = sp;
-  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
-  ASSERT(*state->pc_address != NULL);
-  return EXIT;
+Address ExitFrame::ComputeStackPointer(Address fp) {
+  return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
 }
 
 
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 3d1653c..cf53f4b 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -100,7 +100,7 @@
     // Copy parameters into context if necessary.
     int num_parameters = scope()->num_parameters();
     for (int i = 0; i < num_parameters; i++) {
-      Slot* slot = scope()->parameter(i)->slot();
+      Slot* slot = scope()->parameter(i)->AsSlot();
       if (slot != NULL && slot->type() == Slot::CONTEXT) {
         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
             (num_parameters - 1 - i) * kPointerSize;
@@ -118,7 +118,7 @@
     }
   }
 
-  Variable* arguments = scope()->arguments()->AsVariable();
+  Variable* arguments = scope()->arguments();
   if (arguments != NULL) {
     // Function uses arguments object.
     Comment cmnt(masm_, "[ Allocate arguments object");
@@ -140,9 +140,8 @@
     ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
     __ CallStub(&stub);
     __ mov(ecx, eax);  // Duplicate result.
-    Move(arguments->slot(), eax, ebx, edx);
-    Slot* dot_arguments_slot =
-        scope()->arguments_shadow()->AsVariable()->slot();
+    Move(arguments->AsSlot(), eax, ebx, edx);
+    Slot* dot_arguments_slot = scope()->arguments_shadow()->AsSlot();
     Move(dot_arguments_slot, ecx, ebx, edx);
   }
 
@@ -162,7 +161,7 @@
   }
 
   { Comment cmnt(masm_, "[ Stack check");
-    Label ok;
+    NearLabel ok;
     ExternalReference stack_limit =
         ExternalReference::address_of_stack_limit();
     __ cmp(esp, Operand::StaticVariable(stack_limit));
@@ -239,226 +238,191 @@
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-
-    case Expression::kEffect:
-      // Nothing to do.
-      break;
-
-    case Expression::kValue:
-      // Move value into place.
-      switch (location_) {
-        case kAccumulator:
-          if (!reg.is(result_register())) __ mov(result_register(), reg);
-          break;
-        case kStack:
-          __ push(reg);
-          break;
-      }
-      break;
-
-    case Expression::kTest:
-      // For simplicity we always test the accumulator register.
-      if (!reg.is(result_register())) __ mov(result_register(), reg);
-      DoTest(true_label_, false_label_, fall_through_);
-      break;
-  }
+void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-    case Expression::kEffect:
-      // Nothing to do.
-      break;
-    case Expression::kValue: {
-      MemOperand slot_operand = EmitSlotSearch(slot, result_register());
-      switch (location_) {
-        case kAccumulator:
-          __ mov(result_register(), slot_operand);
-          break;
-        case kStack:
-          // Memory operands can be pushed directly.
-          __ push(slot_operand);
-          break;
-      }
-      break;
+void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
+  MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
+  __ mov(result_register(), slot_operand);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
+  MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
+  // Memory operands can be pushed directly.
+  __ push(slot_operand);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
+  // For simplicity we always test the accumulator register.
+  codegen()->Move(result_register(), slot);
+  codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+  UNREACHABLE();  // Not used on IA32.
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Heap::RootListIndex index) const {
+  UNREACHABLE();  // Not used on IA32.
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Heap::RootListIndex index) const {
+  UNREACHABLE();  // Not used on IA32.
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+  UNREACHABLE();  // Not used on IA32.
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Handle<Object> lit) const {
+  __ mov(result_register(), lit);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+  // Immediates can be pushed directly.
+  __ push(Immediate(lit));
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+    __ jmp(false_label_);
+  } else if (lit->IsTrue() || lit->IsJSObject()) {
+    __ jmp(true_label_);
+  } else if (lit->IsString()) {
+    if (String::cast(*lit)->length() == 0) {
+      __ jmp(false_label_);
+    } else {
+      __ jmp(true_label_);
     }
-
-    case Expression::kTest:
-      // For simplicity we always test the accumulator register.
-      Move(result_register(), slot);
-      DoTest(true_label_, false_label_, fall_through_);
-      break;
+  } else if (lit->IsSmi()) {
+    if (Smi::cast(*lit)->value() == 0) {
+      __ jmp(false_label_);
+    } else {
+      __ jmp(true_label_);
+    }
+  } else {
+    // For simplicity we always test the accumulator register.
+    __ mov(result_register(), lit);
+    codegen()->DoTest(true_label_, false_label_, fall_through_);
   }
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-    case Expression::kEffect:
-      // Nothing to do.
-      break;
-    case Expression::kValue:
-      switch (location_) {
-        case kAccumulator:
-          __ mov(result_register(), lit->handle());
-          break;
-        case kStack:
-          // Immediates can be pushed directly.
-          __ push(Immediate(lit->handle()));
-          break;
-      }
-      break;
-
-    case Expression::kTest:
-      // For simplicity we always test the accumulator register.
-      __ mov(result_register(), lit->handle());
-      DoTest(true_label_, false_label_, fall_through_);
-      break;
-  }
-}
-
-
-void FullCodeGenerator::ApplyTOS(Expression::Context context) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-
-    case Expression::kEffect:
-      __ Drop(1);
-      break;
-
-    case Expression::kValue:
-      switch (location_) {
-        case kAccumulator:
-          __ pop(result_register());
-          break;
-        case kStack:
-          break;
-      }
-      break;
-
-    case Expression::kTest:
-      // For simplicity we always test the accumulator register.
-      __ pop(result_register());
-      DoTest(true_label_, false_label_, fall_through_);
-      break;
-  }
-}
-
-
-void FullCodeGenerator::DropAndApply(int count,
-                                     Expression::Context context,
-                                     Register reg) {
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+                                                   Register reg) const {
   ASSERT(count > 0);
-  ASSERT(!reg.is(esp));
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-
-    case Expression::kEffect:
-      __ Drop(count);
-      break;
-
-    case Expression::kValue:
-      switch (location_) {
-        case kAccumulator:
-          __ Drop(count);
-          if (!reg.is(result_register())) __ mov(result_register(), reg);
-          break;
-        case kStack:
-          if (count > 1) __ Drop(count - 1);
-          __ mov(Operand(esp, 0), reg);
-          break;
-      }
-      break;
-
-    case Expression::kTest:
-      // For simplicity we always test the accumulator register.
-      __ Drop(count);
-      if (!reg.is(result_register())) __ mov(result_register(), reg);
-      DoTest(true_label_, false_label_, fall_through_);
-      break;
-  }
+  __ Drop(count);
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context,
-                              Label* materialize_true,
-                              Label* materialize_false) {
-  switch (context) {
-    case Expression::kUninitialized:
-
-    case Expression::kEffect:
-      ASSERT_EQ(materialize_true, materialize_false);
-      __ bind(materialize_true);
-      break;
-
-    case Expression::kValue: {
-      Label done;
-      switch (location_) {
-        case kAccumulator:
-          __ bind(materialize_true);
-          __ mov(result_register(), Factory::true_value());
-          __ jmp(&done);
-          __ bind(materialize_false);
-          __ mov(result_register(), Factory::false_value());
-          break;
-        case kStack:
-          __ bind(materialize_true);
-          __ push(Immediate(Factory::true_value()));
-          __ jmp(&done);
-          __ bind(materialize_false);
-          __ push(Immediate(Factory::false_value()));
-          break;
-      }
-      __ bind(&done);
-      break;
-    }
-
-    case Expression::kTest:
-      break;
-  }
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+    int count,
+    Register reg) const {
+  ASSERT(count > 0);
+  __ Drop(count);
+  __ Move(result_register(), reg);
 }
 
 
-// Convert constant control flow (true or false) to the result expected for
-// a given expression context.
-void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-      break;
-    case Expression::kEffect:
-      break;
-    case Expression::kValue: {
-      Handle<Object> value =
-          flag ? Factory::true_value() : Factory::false_value();
-      switch (location_) {
-        case kAccumulator:
-          __ mov(result_register(), value);
-          break;
-        case kStack:
-          __ push(Immediate(value));
-          break;
-      }
-      break;
-    }
-    case Expression::kTest:
-      if (flag) {
-        if (true_label_ != fall_through_) __ jmp(true_label_);
-      } else {
-        if (false_label_ != fall_through_) __ jmp(false_label_);
-      }
-      break;
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+                                                       Register reg) const {
+  ASSERT(count > 0);
+  if (count > 1) __ Drop(count - 1);
+  __ mov(Operand(esp, 0), reg);
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+                                                 Register reg) const {
+  ASSERT(count > 0);
+  // For simplicity we always test the accumulator register.
+  __ Drop(count);
+  __ Move(result_register(), reg);
+  codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+                                            Label* materialize_false) const {
+  ASSERT_EQ(materialize_true, materialize_false);
+  __ bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Label* materialize_true,
+    Label* materialize_false) const {
+  NearLabel done;
+  __ bind(materialize_true);
+  __ mov(result_register(), Factory::true_value());
+  __ jmp(&done);
+  __ bind(materialize_false);
+  __ mov(result_register(), Factory::false_value());
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Label* materialize_true,
+    Label* materialize_false) const {
+  NearLabel done;
+  __ bind(materialize_true);
+  __ push(Immediate(Factory::true_value()));
+  __ jmp(&done);
+  __ bind(materialize_false);
+  __ push(Immediate(Factory::false_value()));
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+                                          Label* materialize_false) const {
+  ASSERT(materialize_false == false_label_);
+  ASSERT(materialize_true == true_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+  Handle<Object> value =
+      flag ? Factory::true_value() : Factory::false_value();
+  __ mov(result_register(), value);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+  Handle<Object> value =
+      flag ? Factory::true_value() : Factory::false_value();
+  __ push(Immediate(value));
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  if (flag) {
+    if (true_label_ != fall_through_) __ jmp(true_label_);
+  } else {
+    if (false_label_ != fall_through_) __ jmp(false_label_);
   }
 }
 
@@ -551,7 +515,7 @@
                                         FunctionLiteral* function) {
   Comment cmnt(masm_, "[ Declaration");
   ASSERT(variable != NULL);  // Must have been resolved.
-  Slot* slot = variable->slot();
+  Slot* slot = variable->AsSlot();
   Property* prop = variable->AsProperty();
   if (slot != NULL) {
     switch (slot->type()) {
@@ -561,7 +525,7 @@
           __ mov(Operand(ebp, SlotOffset(slot)),
                  Immediate(Factory::the_hole_value()));
         } else if (function != NULL) {
-          VisitForValue(function, kAccumulator);
+          VisitForAccumulatorValue(function);
           __ mov(Operand(ebp, SlotOffset(slot)), result_register());
         }
         break;
@@ -583,7 +547,7 @@
                  Immediate(Factory::the_hole_value()));
           // No write barrier since the hole value is in old space.
         } else if (function != NULL) {
-          VisitForValue(function, kAccumulator);
+          VisitForAccumulatorValue(function);
           __ mov(ContextOperand(esi, slot->index()), result_register());
           int offset = Context::SlotOffset(slot->index());
           __ mov(ebx, esi);
@@ -605,7 +569,7 @@
         if (mode == Variable::CONST) {
           __ push(Immediate(Factory::the_hole_value()));
         } else if (function != NULL) {
-          VisitForValue(function, kStack);
+          VisitForStackValue(function);
         } else {
           __ push(Immediate(Smi::FromInt(0)));  // No initial value!
         }
@@ -618,23 +582,20 @@
     if (function != NULL || mode == Variable::CONST) {
       // We are declaring a function or constant that rewrites to a
       // property.  Use (keyed) IC to set the initial value.
-      VisitForValue(prop->obj(), kStack);
+      VisitForStackValue(prop->obj());
       if (function != NULL) {
-        VisitForValue(prop->key(), kStack);
-        VisitForValue(function, kAccumulator);
+        VisitForStackValue(prop->key());
+        VisitForAccumulatorValue(function);
         __ pop(ecx);
       } else {
-        VisitForValue(prop->key(), kAccumulator);
+        VisitForAccumulatorValue(prop->key());
         __ mov(ecx, result_register());
         __ mov(result_register(), Factory::the_hole_value());
       }
       __ pop(edx);
 
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // Absence of a test eax instruction following the call
-      // indicates that none of the load was inlined.
-      __ nop();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
     }
   }
 }
@@ -660,7 +621,7 @@
   Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
   // Keep the switch value on the stack until a case matches.
-  VisitForValue(stmt->tag(), kStack);
+  VisitForStackValue(stmt->tag());
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -680,12 +641,13 @@
     next_test.Unuse();
 
     // Compile the label expression.
-    VisitForValue(clause->label(), kAccumulator);
+    VisitForAccumulatorValue(clause->label());
 
     // Perform the comparison as if via '==='.
     __ mov(edx, Operand(esp, 0));  // Switch value.
-    if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
-      Label slow_case;
+    bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+    if (inline_smi_code) {
+      NearLabel slow_case;
       __ mov(ecx, edx);
       __ or_(ecx, Operand(eax));
       __ test(ecx, Immediate(kSmiTagMask));
@@ -697,7 +659,10 @@
       __ bind(&slow_case);
     }
 
-    CompareStub stub(equal, true);
+    CompareFlags flags = inline_smi_code
+        ? NO_SMI_COMPARE_IN_STUB
+        : NO_COMPARE_FLAGS;
+    CompareStub stub(equal, true, flags);
     __ CallStub(&stub);
     __ test(eax, Operand(eax));
     __ j(not_equal, &next_test);
@@ -738,14 +703,14 @@
   // Get the object to enumerate over. Both SpiderMonkey and JSC
   // ignore null and undefined in contrast to the specification; see
   // ECMA-262 section 12.6.4.
-  VisitForValue(stmt->enumerable(), kAccumulator);
+  VisitForAccumulatorValue(stmt->enumerable());
   __ cmp(eax, Factory::undefined_value());
   __ j(equal, &exit);
   __ cmp(eax, Factory::null_value());
   __ j(equal, &exit);
 
   // Convert the object to a JS object.
-  Label convert, done_convert;
+  NearLabel convert, done_convert;
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, &convert);
   __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
@@ -786,7 +751,7 @@
   __ j(zero, &call_runtime);
 
   // For all objects but the receiver, check that the cache is empty.
-  Label check_prototype;
+  NearLabel check_prototype;
   __ cmp(ecx, Operand(eax));
   __ j(equal, &check_prototype);
   __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -801,7 +766,7 @@
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
-  Label use_cache;
+  NearLabel use_cache;
   __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
   __ jmp(&use_cache);
 
@@ -813,7 +778,7 @@
   // If we got a map from the runtime call, we can do a fast
   // modification check. Otherwise, we got a fixed array, and we have
   // to do a slow check.
-  Label fixed_array;
+  NearLabel fixed_array;
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::meta_map());
   __ j(not_equal, &fixed_array);
 
@@ -855,7 +820,7 @@
 
   // Check if the expected map still matches that of the enumerable.
   // If not, we have to filter the key.
-  Label update_each;
+  NearLabel update_each;
   __ mov(ecx, Operand(esp, 4 * kPointerSize));
   __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
   __ j(equal, &update_each);
@@ -878,7 +843,8 @@
   EmitAssignment(stmt->each());
 
   // Generate code for the body of the loop.
-  Label stack_limit_hit, stack_check_done;
+  Label stack_limit_hit;
+  NearLabel stack_check_done;
   Visit(stmt->body());
 
   __ StackLimitCheck(&stack_limit_hit);
@@ -918,13 +884,13 @@
     __ push(Immediate(info));
     __ CallRuntime(Runtime::kNewClosure, 2);
   }
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
-  EmitVariableLoad(expr->var(), context_);
+  EmitVariableLoad(expr->var());
 }
 
 
@@ -960,7 +926,7 @@
   if (s != NULL && s->is_eval_scope()) {
     // Loop up the context chain.  There is no frame effect so it is
     // safe to use raw labels here.
-    Label next, fast;
+    NearLabel next, fast;
     if (!context.is(temp)) {
       __ mov(temp, context);
     }
@@ -987,7 +953,7 @@
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
-  __ call(ic, mode);
+  EmitCallIC(ic, mode);
 }
 
 
@@ -1034,7 +1000,7 @@
     EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
     __ jmp(done);
   } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
     Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
     if (potential_slot != NULL) {
       // Generate fast case for locals that rewrite to slots.
@@ -1060,11 +1026,11 @@
           // variables. Then load the argument from the arguments
           // object using keyed load.
           __ mov(edx,
-                 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
                                                    slow));
           __ mov(eax, Immediate(key_literal->handle()));
           Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-          __ call(ic, RelocInfo::CODE_TARGET);
+          EmitCallIC(ic, RelocInfo::CODE_TARGET);
           __ jmp(done);
         }
       }
@@ -1073,12 +1039,11 @@
 }
 
 
-void FullCodeGenerator::EmitVariableLoad(Variable* var,
-                                         Expression::Context context) {
+void FullCodeGenerator::EmitVariableLoad(Variable* var) {
   // Four cases: non-this global variables, lookup slots, all other
   // types of slots, and parameters that rewrite to explicit property
   // accesses on the arguments object.
-  Slot* slot = var->slot();
+  Slot* slot = var->AsSlot();
   Property* property = var->AsProperty();
 
   if (var->is_global() && !var->is_this()) {
@@ -1088,13 +1053,8 @@
     __ mov(eax, CodeGenerator::GlobalObject());
     __ mov(ecx, var->name());
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-    __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-    // By emitting a nop we make sure that we do not have a test eax
-    // instruction after the call it is treated specially by the LoadIC code
-    // Remember that the assembler may choose to do peephole optimization
-    // (eg, push/pop elimination).
-    __ nop();
-    Apply(context, eax);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    context()->Plug(eax);
 
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
     Label done, slow;
@@ -1110,7 +1070,7 @@
     __ CallRuntime(Runtime::kLoadContextSlot, 2);
     __ bind(&done);
 
-    Apply(context, eax);
+    context()->Plug(eax);
 
   } else if (slot != NULL) {
     Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
@@ -1119,16 +1079,16 @@
     if (var->mode() == Variable::CONST) {
       // Constants may be the hole value if they have not been initialized.
       // Unhole them.
-      Label done;
+      NearLabel done;
       MemOperand slot_operand = EmitSlotSearch(slot, eax);
       __ mov(eax, slot_operand);
       __ cmp(eax, Factory::the_hole_value());
       __ j(not_equal, &done);
       __ mov(eax, Factory::undefined_value());
       __ bind(&done);
-      Apply(context, eax);
+      context()->Plug(eax);
     } else {
-      Apply(context, slot);
+      context()->Plug(slot);
     }
 
   } else {
@@ -1139,7 +1099,7 @@
     // Assert that the object is in a slot.
     Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
     ASSERT_NOT_NULL(object_var);
-    Slot* object_slot = object_var->slot();
+    Slot* object_slot = object_var->AsSlot();
     ASSERT_NOT_NULL(object_slot);
 
     // Load the object.
@@ -1156,19 +1116,17 @@
 
     // Do a keyed property load.
     Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-    __ call(ic, RelocInfo::CODE_TARGET);
-    // Notice: We must not have a "test eax, ..." instruction after the
-    // call. It is treated specially by the LoadIC code.
-    __ nop();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+
     // Drop key and object left on the stack by IC.
-    Apply(context, eax);
+    context()->Plug(eax);
   }
 }
 
 
 void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
   Comment cmnt(masm_, "[ RegExpLiteral");
-  Label materialized;
+  NearLabel materialized;
   // Registers will be used as follows:
   // edi = JS function.
   // ecx = literals array.
@@ -1216,7 +1174,7 @@
     __ mov(edx, FieldOperand(ebx, size - kPointerSize));
     __ mov(FieldOperand(eax, size - kPointerSize), edx);
   }
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -1253,29 +1211,28 @@
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
-          VisitForValue(value, kAccumulator);
+          VisitForAccumulatorValue(value);
           __ mov(ecx, Immediate(key->handle()));
           __ mov(edx, Operand(esp, 0));
           Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-          __ call(ic, RelocInfo::CODE_TARGET);
-          __ nop();
+          EmitCallIC(ic,  RelocInfo::CODE_TARGET);
           break;
         }
         // Fall through.
       case ObjectLiteral::Property::PROTOTYPE:
         __ push(Operand(esp, 0));  // Duplicate receiver.
-        VisitForValue(key, kStack);
-        VisitForValue(value, kStack);
+        VisitForStackValue(key);
+        VisitForStackValue(value);
         __ CallRuntime(Runtime::kSetProperty, 3);
         break;
       case ObjectLiteral::Property::SETTER:
       case ObjectLiteral::Property::GETTER:
         __ push(Operand(esp, 0));  // Duplicate receiver.
-        VisitForValue(key, kStack);
+        VisitForStackValue(key);
         __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
                           Smi::FromInt(1) :
                           Smi::FromInt(0)));
-        VisitForValue(value, kStack);
+        VisitForStackValue(value);
         __ CallRuntime(Runtime::kDefineAccessor, 4);
         break;
       default: UNREACHABLE();
@@ -1283,9 +1240,9 @@
   }
 
   if (result_saved) {
-    ApplyTOS(context_);
+    context()->PlugTOS();
   } else {
-    Apply(context_, eax);
+    context()->Plug(eax);
   }
 }
 
@@ -1332,7 +1289,7 @@
       __ push(eax);
       result_saved = true;
     }
-    VisitForValue(subexpr, kAccumulator);
+    VisitForAccumulatorValue(subexpr);
 
     // Store the subexpression value in the array's elements.
     __ mov(ebx, Operand(esp, 0));  // Copy of array literal.
@@ -1345,9 +1302,9 @@
   }
 
   if (result_saved) {
-    ApplyTOS(context_);
+    context()->PlugTOS();
   } else {
-    Apply(context_, eax);
+    context()->Plug(eax);
   }
 }
 
@@ -1380,39 +1337,38 @@
     case NAMED_PROPERTY:
       if (expr->is_compound()) {
         // We need the receiver both on the stack and in the accumulator.
-        VisitForValue(property->obj(), kAccumulator);
+        VisitForAccumulatorValue(property->obj());
         __ push(result_register());
       } else {
-        VisitForValue(property->obj(), kStack);
+        VisitForStackValue(property->obj());
       }
       break;
     case KEYED_PROPERTY:
       if (expr->is_compound()) {
-        VisitForValue(property->obj(), kStack);
-        VisitForValue(property->key(), kAccumulator);
+        VisitForStackValue(property->obj());
+        VisitForAccumulatorValue(property->key());
         __ mov(edx, Operand(esp, 0));
         __ push(eax);
       } else {
-        VisitForValue(property->obj(), kStack);
-        VisitForValue(property->key(), kStack);
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
       }
       break;
   }
 
   if (expr->is_compound()) {
-    Location saved_location = location_;
-    location_ = kAccumulator;
-    switch (assign_type) {
-      case VARIABLE:
-        EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
-                         Expression::kValue);
-        break;
-      case NAMED_PROPERTY:
-        EmitNamedPropertyLoad(property);
-        break;
-      case KEYED_PROPERTY:
-        EmitKeyedPropertyLoad(property);
-        break;
+    { AccumulatorValueContext context(this);
+      switch (assign_type) {
+        case VARIABLE:
+          EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          break;
+        case NAMED_PROPERTY:
+          EmitNamedPropertyLoad(property);
+          break;
+        case KEYED_PROPERTY:
+          EmitKeyedPropertyLoad(property);
+          break;
+      }
     }
 
     Token::Value op = expr->binary_op();
@@ -1422,28 +1378,26 @@
     ASSERT(constant == kRightConstant || constant == kNoConstants);
     if (constant == kNoConstants) {
       __ push(eax);  // Left operand goes on the stack.
-      VisitForValue(expr->value(), kAccumulator);
+      VisitForAccumulatorValue(expr->value());
     }
 
     OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
         ? OVERWRITE_RIGHT
         : NO_OVERWRITE;
     SetSourcePosition(expr->position() + 1);
+    AccumulatorValueContext context(this);
     if (ShouldInlineSmiCase(op)) {
       EmitInlineSmiBinaryOp(expr,
                             op,
-                            Expression::kValue,
                             mode,
                             expr->target(),
                             expr->value(),
                             constant);
     } else {
-      EmitBinaryOp(op, Expression::kValue, mode);
+      EmitBinaryOp(op, mode);
     }
-    location_ = saved_location;
-
   } else {
-    VisitForValue(expr->value(), kAccumulator);
+    VisitForAccumulatorValue(expr->value());
   }
 
   // Record source position before possible IC call.
@@ -1453,8 +1407,7 @@
   switch (assign_type) {
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             expr->op(),
-                             context_);
+                             expr->op());
       break;
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
@@ -1471,25 +1424,23 @@
   Literal* key = prop->key()->AsLiteral();
   __ mov(ecx, Immediate(key->handle()));
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 }
 
 
 void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
-                                           Expression::Context context,
                                            OverwriteMode mode,
                                            bool left_is_constant_smi,
                                            Smi* value) {
-  Label call_stub, done;
+  NearLabel call_stub;
+  Label done;
   __ add(Operand(eax), Immediate(value));
   __ j(overflow, &call_stub);
   __ test(eax, Immediate(kSmiTagMask));
@@ -1509,12 +1460,11 @@
   }
   __ CallStub(&stub);
   __ bind(&done);
-  Apply(context, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
-                                           Expression::Context context,
                                            OverwriteMode mode,
                                            bool left_is_constant_smi,
                                            Smi* value) {
@@ -1546,13 +1496,12 @@
   GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
   __ CallStub(&stub);
   __ bind(&done);
-  Apply(context, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
                                                Token::Value op,
-                                               Expression::Context context,
                                                OverwriteMode mode,
                                                Smi* value) {
   Label call_stub, smi_case, done;
@@ -1609,13 +1558,12 @@
   }
 
   __ bind(&done);
-  Apply(context, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
                                              Token::Value op,
-                                             Expression::Context context,
                                              OverwriteMode mode,
                                              Smi* value) {
   Label smi_case, done;
@@ -1646,13 +1594,12 @@
   }
 
   __ bind(&done);
-  Apply(context, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
                                                 Token::Value op,
-                                                Expression::Context context,
                                                 OverwriteMode mode,
                                                 bool left_is_constant_smi,
                                                 Smi* value) {
@@ -1660,19 +1607,19 @@
     case Token::BIT_OR:
     case Token::BIT_XOR:
     case Token::BIT_AND:
-      EmitConstantSmiBitOp(expr, op, context, mode, value);
+      EmitConstantSmiBitOp(expr, op, mode, value);
       break;
     case Token::SHL:
     case Token::SAR:
     case Token::SHR:
       ASSERT(!left_is_constant_smi);
-      EmitConstantSmiShiftOp(expr, op, context, mode, value);
+      EmitConstantSmiShiftOp(expr, op, mode, value);
       break;
     case Token::ADD:
-      EmitConstantSmiAdd(expr, context, mode, left_is_constant_smi, value);
+      EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value);
       break;
     case Token::SUB:
-      EmitConstantSmiSub(expr, context, mode, left_is_constant_smi, value);
+      EmitConstantSmiSub(expr, mode, left_is_constant_smi, value);
       break;
     default:
       UNREACHABLE();
@@ -1682,18 +1629,17 @@
 
 void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
                                               Token::Value op,
-                                              Expression::Context context,
                                               OverwriteMode mode,
                                               Expression* left,
                                               Expression* right,
                                               ConstantOperand constant) {
   if (constant == kRightConstant) {
     Smi* value =  Smi::cast(*right->AsLiteral()->handle());
-    EmitConstantSmiBinaryOp(expr, op, context, mode, false, value);
+    EmitConstantSmiBinaryOp(expr, op, mode, false, value);
     return;
   } else if (constant == kLeftConstant) {
     Smi* value =  Smi::cast(*left->AsLiteral()->handle());
-    EmitConstantSmiBinaryOp(expr, op, context, mode, true, value);
+    EmitConstantSmiBinaryOp(expr, op, mode, true, value);
     return;
   }
 
@@ -1787,12 +1733,11 @@
   }
 
   __ bind(&done);
-  Apply(context, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitBinaryOp(Token::Value op,
-                                     Expression::Context context,
                                      OverwriteMode mode) {
   TypeInfo type = TypeInfo::Unknown();
   GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS, type);
@@ -1803,7 +1748,7 @@
     __ push(result_register());
     __ CallStub(&stub);
   }
-  Apply(context, eax);
+  context()->Plug(eax);
 }
 
 
@@ -1829,30 +1774,29 @@
   switch (assign_type) {
     case VARIABLE: {
       Variable* var = expr->AsVariableProxy()->var();
-      EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect);
+      EffectContext context(this);
+      EmitVariableAssignment(var, Token::ASSIGN);
       break;
     }
     case NAMED_PROPERTY: {
       __ push(eax);  // Preserve value.
-      VisitForValue(prop->obj(), kAccumulator);
+      VisitForAccumulatorValue(prop->obj());
       __ mov(edx, eax);
       __ pop(eax);  // Restore value.
       __ mov(ecx, prop->key()->AsLiteral()->handle());
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      __ nop();  // Signal no inlined code.
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
     case KEYED_PROPERTY: {
       __ push(eax);  // Preserve value.
-      VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kAccumulator);
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
       __ mov(ecx, eax);
       __ pop(edx);
       __ pop(eax);  // Restore value.
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      __ nop();  // Signal no inlined code.
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
   }
@@ -1860,12 +1804,11 @@
 
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
-                                               Token::Value op,
-                                               Expression::Context context) {
+                                               Token::Value op) {
   // Left-hand sides that rewrite to explicit property accesses do not reach
   // here.
   ASSERT(var != NULL);
-  ASSERT(var->is_global() || var->slot() != NULL);
+  ASSERT(var->is_global() || var->AsSlot() != NULL);
 
   if (var->is_global()) {
     ASSERT(!var->is_this());
@@ -1875,14 +1818,13 @@
     __ mov(ecx, var->name());
     __ mov(edx, CodeGenerator::GlobalObject());
     Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-    __ call(ic, RelocInfo::CODE_TARGET);
-    __ nop();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
     // Perform the assignment for non-const variables and for initialization
     // of const variables.  Const assignments are simply skipped.
     Label done;
-    Slot* slot = var->slot();
+    Slot* slot = var->AsSlot();
     switch (slot->type()) {
       case Slot::PARAMETER:
       case Slot::LOCAL:
@@ -1931,7 +1873,7 @@
     __ bind(&done);
   }
 
-  Apply(context, eax);
+  context()->Plug(eax);
 }
 
 
@@ -1960,8 +1902,7 @@
     __ pop(edx);
   }
   Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1969,9 +1910,9 @@
     __ push(Operand(esp, kPointerSize));  // Receiver is under value.
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
-    DropAndApply(1, context_, eax);
+    context()->DropAndPlug(1, eax);
   } else {
-    Apply(context_, eax);
+    context()->Plug(eax);
   }
 }
 
@@ -1999,10 +1940,7 @@
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  // This nop signals to the IC that there is no inlined code at the call
-  // site for it to patch.
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2013,7 +1951,7 @@
     __ pop(eax);
   }
 
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2022,16 +1960,15 @@
   Expression* key = expr->key();
 
   if (key->IsPropertyName()) {
-    VisitForValue(expr->obj(), kAccumulator);
+    VisitForAccumulatorValue(expr->obj());
     EmitNamedPropertyLoad(expr);
-    Apply(context_, eax);
   } else {
-    VisitForValue(expr->obj(), kStack);
-    VisitForValue(expr->key(), kAccumulator);
+    VisitForStackValue(expr->obj());
+    VisitForAccumulatorValue(expr->key());
     __ pop(edx);
     EmitKeyedPropertyLoad(expr);
-    Apply(context_, eax);
   }
+  context()->Plug(eax);
 }
 
 
@@ -2042,17 +1979,17 @@
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
   __ Set(ecx, Immediate(name));
   // Record source position of the IC call.
   SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
-  __ call(ic, mode);
+  EmitCallIC(ic, mode);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2063,19 +2000,19 @@
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
-  VisitForValue(key, kAccumulator);
+  VisitForAccumulatorValue(key);
   __ mov(ecx, eax);
   // Record source position of the IC call.
   SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(
       arg_count, in_loop);
-  __ call(ic, mode);
+  EmitCallIC(ic, mode);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2084,7 +2021,7 @@
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
@@ -2093,7 +2030,7 @@
   __ CallStub(&stub);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  DropAndApply(1, context_, eax);
+  context()->DropAndPlug(1, eax);
 }
 
 
@@ -2107,14 +2044,14 @@
     // resolve the function we need to call and the receiver of the
     // call.  Then we call the resolved function using the given
     // arguments.
-    VisitForValue(fun, kStack);
+    VisitForStackValue(fun);
     __ push(Immediate(Factory::undefined_value()));  // Reserved receiver slot.
 
     // Push the arguments.
     ZoneList<Expression*>* args = expr->arguments();
     int arg_count = args->length();
     for (int i = 0; i < arg_count; i++) {
-      VisitForValue(args->at(i), kStack);
+      VisitForStackValue(args->at(i));
     }
 
     // Push copy of the function - found below the arguments.
@@ -2143,19 +2080,19 @@
     __ CallStub(&stub);
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    DropAndApply(1, context_, eax);
+    context()->DropAndPlug(1, eax);
   } else if (var != NULL && !var->is_this() && var->is_global()) {
     // Push global object as receiver for the call IC.
     __ push(CodeGenerator::GlobalObject());
     EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
-  } else if (var != NULL && var->slot() != NULL &&
-             var->slot()->type() == Slot::LOOKUP) {
+  } else if (var != NULL && var->AsSlot() != NULL &&
+             var->AsSlot()->type() == Slot::LOOKUP) {
     // Call to a lookup slot (dynamically introduced variable).
     Label slow, done;
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    EmitDynamicLoadFromSlotFastCase(var->slot(),
+    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
                                     NOT_INSIDE_TYPEOF,
                                     &slow,
                                     &done);
@@ -2191,25 +2128,21 @@
     Literal* key = prop->key()->AsLiteral();
     if (key != NULL && key->handle()->IsSymbol()) {
       // Call to a named property, use call IC.
-      VisitForValue(prop->obj(), kStack);
+      VisitForStackValue(prop->obj());
       EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
     } else {
       // Call to a keyed property.
       // For a synthetic property use keyed load IC followed by function call,
-      // for a regular property use keyed CallIC.
-      VisitForValue(prop->obj(), kStack);
+      // for a regular property use keyed EmitCallIC.
+      VisitForStackValue(prop->obj());
       if (prop->is_synthetic()) {
-        VisitForValue(prop->key(), kAccumulator);
+        VisitForAccumulatorValue(prop->key());
         // Record source code position for IC call.
         SetSourcePosition(prop->position());
         __ pop(edx);  // We do not need to keep the receiver.
 
         Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-        __ call(ic, RelocInfo::CODE_TARGET);
-        // By emitting a nop we make sure that we do not have a "test eax,..."
-        // instruction after the call as it is treated specially
-        // by the LoadIC code.
-        __ nop();
+        EmitCallIC(ic, RelocInfo::CODE_TARGET);
         // Push result (function).
         __ push(eax);
         // Push Global receiver.
@@ -2230,7 +2163,7 @@
         loop_depth() == 0) {
       lit->set_try_full_codegen(true);
     }
-    VisitForValue(fun, kStack);
+    VisitForStackValue(fun);
     // Load global receiver object.
     __ mov(ebx, CodeGenerator::GlobalObject());
     __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
@@ -2249,13 +2182,13 @@
   // Push constructor on the stack.  If it's not a function it's used as
   // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
   // ignored.
-  VisitForValue(expr->expression(), kStack);
+  VisitForStackValue(expr->expression());
 
   // Push the arguments ("left-to-right") on the stack.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
 
   // Call the construct call builtin that handles allocation and
@@ -2268,59 +2201,59 @@
 
   Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
   __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ test(eax, Immediate(kSmiTagMask));
   Split(zero, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ test(eax, Immediate(kSmiTagMask | 0x80000000));
   Split(zero, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, if_false);
@@ -2337,42 +2270,42 @@
   __ cmp(ecx, LAST_JS_OBJECT_TYPE);
   Split(below_equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ test(eax, Immediate(kSmiTagMask));
   __ j(equal, if_false);
   __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
   Split(above_equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, if_false);
@@ -2381,7 +2314,7 @@
   __ test(ebx, Immediate(1 << Map::kIsUndetectable));
   Split(not_zero, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -2389,83 +2322,83 @@
     ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
   // used in a few functions in runtime.js which should not normally be hit by
   // this compiler.
   __ jmp(if_false);
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, if_false);
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
   Split(equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ test(eax, Immediate(kSmiTagMask));
   __ j(equal, if_false);
   __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
   Split(equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ test(eax, Immediate(kSmiTagMask));
   __ j(equal, if_false);
   __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
   Split(equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -2477,8 +2410,8 @@
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   // Get the frame pointer for the calling frame.
   __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2496,7 +2429,7 @@
          Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
   Split(equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -2504,21 +2437,21 @@
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ pop(ebx);
   __ cmp(eax, Operand(ebx));
   Split(equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -2527,12 +2460,12 @@
 
   // ArgumentsAccessStub expects the key in edx and the formal
   // parameter count in eax.
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
   __ mov(edx, eax);
   __ mov(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
   __ CallStub(&stub);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2555,7 +2488,7 @@
 
   __ bind(&exit);
   if (FLAG_debug_code) __ AbortIfNotSmi(eax);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2563,7 +2496,7 @@
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   // If the object is a smi, we return null.
   __ test(eax, Immediate(kSmiTagMask));
@@ -2610,7 +2543,7 @@
   // All done.
   __ bind(&done);
 
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2625,14 +2558,14 @@
   ASSERT_EQ(args->length(), 3);
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
-    VisitForValue(args->at(1), kStack);
-    VisitForValue(args->at(2), kStack);
+    VisitForStackValue(args->at(1));
+    VisitForStackValue(args->at(2));
     __ CallRuntime(Runtime::kLog, 2);
   }
 #endif
   // Finally, we're expected to leave a value on the top of the stack.
   __ mov(eax, Factory::undefined_value());
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2680,7 +2613,7 @@
     __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
   }
   __ mov(eax, edi);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2688,11 +2621,11 @@
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
   ASSERT(args->length() == 3);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
-  VisitForValue(args->at(2), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
   __ CallStub(&stub);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2700,21 +2633,21 @@
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
   ASSERT(args->length() == 4);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
-  VisitForValue(args->at(2), kStack);
-  VisitForValue(args->at(3), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  VisitForStackValue(args->at(3));
   __ CallStub(&stub);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);  // Load the object.
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
 
-  Label done;
+  NearLabel done;
   // If the object is a smi return the object.
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, &done);
@@ -2724,28 +2657,28 @@
   __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
 
   __ bind(&done);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the runtime function.
   ASSERT(args->length() == 2);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
   __ CallRuntime(Runtime::kMath_pow, 2);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
-  VisitForValue(args->at(0), kStack);  // Load the object.
-  VisitForValue(args->at(1), kAccumulator);  // Load the value.
+  VisitForStackValue(args->at(0));  // Load the object.
+  VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(ebx);  // eax = value. ebx = object.
 
-  Label done;
+  NearLabel done;
   // If the object is a smi, return the value.
   __ test(ebx, Immediate(kSmiTagMask));
   __ j(zero, &done);
@@ -2762,7 +2695,7 @@
   __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
 
   __ bind(&done);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2770,18 +2703,18 @@
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
-  VisitForValue(args->at(0), kStack);
+  VisitForStackValue(args->at(0));
 
   NumberToStringStub stub;
   __ CallStub(&stub);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label done;
   StringCharFromCodeGenerator generator(eax, ebx);
@@ -2792,15 +2725,15 @@
   generator.GenerateSlow(masm_, call_helper);
 
   __ bind(&done);
-  Apply(context_, ebx);
+  context()->Plug(ebx);
 }
 
 
 void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
 
   Register object = ebx;
   Register index = eax;
@@ -2839,15 +2772,15 @@
   generator.GenerateSlow(masm_, call_helper);
 
   __ bind(&done);
-  Apply(context_, result);
+  context()->Plug(result);
 }
 
 
 void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
 
   Register object = ebx;
   Register index = eax;
@@ -2888,31 +2821,31 @@
   generator.GenerateSlow(masm_, call_helper);
 
   __ bind(&done);
-  Apply(context_, result);
+  context()->Plug(result);
 }
 
 
 void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
 
   StringAddStub stub(NO_STRING_ADD_FLAGS);
   __ CallStub(&stub);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
 
   StringCompareStub stub;
   __ CallStub(&stub);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2920,9 +2853,9 @@
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN);
   ASSERT(args->length() == 1);
-  VisitForValue(args->at(0), kStack);
+  VisitForStackValue(args->at(0));
   __ CallStub(&stub);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2930,18 +2863,18 @@
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS);
   ASSERT(args->length() == 1);
-  VisitForValue(args->at(0), kStack);
+  VisitForStackValue(args->at(0));
   __ CallStub(&stub);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the runtime function.
   ASSERT(args->length() == 1);
-  VisitForValue(args->at(0), kStack);
+  VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2949,38 +2882,38 @@
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // For receiver and function.
-  VisitForValue(args->at(0), kStack);  // Receiver.
+  VisitForStackValue(args->at(0));  // Receiver.
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i + 1), kStack);
+    VisitForStackValue(args->at(i + 1));
   }
-  VisitForValue(args->at(arg_count + 1), kAccumulator);  // Function.
+  VisitForAccumulatorValue(args->at(arg_count + 1));  // Function.
 
   // InvokeFunction requires function in edi. Move it in there.
   if (!result_register().is(edi)) __ mov(edi, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(edi, count, CALL_FUNCTION);
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 3);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
-  VisitForValue(args->at(2), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
   __ CallRuntime(Runtime::kRegExpConstructResult, 3);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 3);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
-  VisitForValue(args->at(2), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
   __ CallRuntime(Runtime::kSwapElements, 3);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -2995,11 +2928,11 @@
   if (jsfunction_result_caches->length() <= cache_id) {
     __ Abort("Attempt to use undefined cache.");
     __ mov(eax, Factory::undefined_value());
-    Apply(context_, eax);
+    context()->Plug(eax);
     return;
   }
 
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForAccumulatorValue(args->at(1));
 
   Register key = eax;
   Register cache = ebx;
@@ -3028,7 +2961,7 @@
   __ CallRuntime(Runtime::kGetFromCache, 2);
 
   __ bind(&done);
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -3039,8 +2972,8 @@
   Register left = ebx;
   Register tmp = ecx;
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
   __ pop(left);
 
   Label done, fail, ok;
@@ -3065,14 +2998,14 @@
   __ mov(eax, Immediate(Factory::true_value()));
   __ bind(&done);
 
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   if (FLAG_debug_code) {
     __ AbortIfNotString(eax);
@@ -3082,21 +3015,21 @@
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ test(FieldOperand(eax, String::kHashFieldOffset),
           Immediate(String::kContainsCachedArrayIndexMask));
   Split(zero, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   if (FLAG_debug_code) {
     __ AbortIfNotString(eax);
@@ -3105,7 +3038,7 @@
   __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
   __ IndexFromHash(eax, eax);
 
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -3129,7 +3062,7 @@
   // Push the arguments ("left-to-right").
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
 
   if (expr->is_jsruntime()) {
@@ -3137,14 +3070,14 @@
     __ Set(ecx, Immediate(expr->name()));
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
     Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
-    __ call(ic, RelocInfo::CODE_TARGET);
-      // Restore context register.
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   } else {
     // Call the C runtime function.
     __ CallRuntime(expr->function(), arg_count);
   }
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
@@ -3158,20 +3091,20 @@
         // Result of deleting non-property, non-variable reference is true.
         // The subexpression may have side effects.
         VisitForEffect(expr->expression());
-        Apply(context_, true);
+        context()->Plug(true);
       } else if (var != NULL &&
                  !var->is_global() &&
-                 var->slot() != NULL &&
-                 var->slot()->type() != Slot::LOOKUP) {
+                 var->AsSlot() != NULL &&
+                 var->AsSlot()->type() != Slot::LOOKUP) {
         // Result of deleting non-global, non-dynamic variables is false.
         // The subexpression does not have side effects.
-        Apply(context_, false);
+        context()->Plug(false);
       } else {
         // Property or variable reference.  Call the delete builtin with
         // object and property name as arguments.
         if (prop != NULL) {
-          VisitForValue(prop->obj(), kStack);
-          VisitForValue(prop->key(), kStack);
+          VisitForStackValue(prop->obj());
+          VisitForStackValue(prop->key());
         } else if (var->is_global()) {
           __ push(CodeGenerator::GlobalObject());
           __ push(Immediate(var->name()));
@@ -3185,7 +3118,7 @@
           __ push(Immediate(var->name()));
         }
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-        Apply(context_, eax);
+        context()->Plug(eax);
       }
       break;
     }
@@ -3193,26 +3126,7 @@
     case Token::VOID: {
       Comment cmnt(masm_, "[ UnaryOperation (VOID)");
       VisitForEffect(expr->expression());
-      switch (context_) {
-        case Expression::kUninitialized:
-          UNREACHABLE();
-          break;
-        case Expression::kEffect:
-          break;
-        case Expression::kValue:
-          switch (location_) {
-            case kAccumulator:
-              __ mov(result_register(), Factory::undefined_value());
-              break;
-            case kStack:
-              __ push(Immediate(Factory::undefined_value()));
-              break;
-          }
-          break;
-        case Expression::kTest:
-          __ jmp(false_label_);
-          break;
-      }
+      context()->Plug(Factory::undefined_value());
       break;
     }
 
@@ -3224,31 +3138,33 @@
       Label* if_false = NULL;
       Label* fall_through = NULL;
       // Notice that the labels are swapped.
-      PrepareTest(&materialize_true, &materialize_false,
-                  &if_false, &if_true, &fall_through);
+      context()->PrepareTest(&materialize_true, &materialize_false,
+                             &if_false, &if_true, &fall_through);
       VisitForControl(expr->expression(), if_true, if_false, fall_through);
-      Apply(context_, if_false, if_true);  // Labels swapped.
+      context()->Plug(if_false, if_true);  // Labels swapped.
       break;
     }
 
     case Token::TYPEOF: {
       Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
-      VisitForTypeofValue(expr->expression(), kStack);
+      { StackValueContext context(this);
+        VisitForTypeofValue(expr->expression());
+      }
       __ CallRuntime(Runtime::kTypeof, 1);
-      Apply(context_, eax);
+      context()->Plug(eax);
       break;
     }
 
     case Token::ADD: {
       Comment cmt(masm_, "[ UnaryOperation (ADD)");
-      VisitForValue(expr->expression(), kAccumulator);
+      VisitForAccumulatorValue(expr->expression());
       Label no_conversion;
       __ test(result_register(), Immediate(kSmiTagMask));
       __ j(zero, &no_conversion);
       __ push(result_register());
       __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
       __ bind(&no_conversion);
-      Apply(context_, result_register());
+      context()->Plug(result_register());
       break;
     }
 
@@ -3257,12 +3173,12 @@
       bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
       UnaryOverwriteMode overwrite =
           can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::SUB, overwrite);
+      GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
       // GenericUnaryOpStub expects the argument to be in the
       // accumulator register eax.
-      VisitForValue(expr->expression(), kAccumulator);
+      VisitForAccumulatorValue(expr->expression());
       __ CallStub(&stub);
-      Apply(context_, eax);
+      context()->Plug(eax);
       break;
     }
 
@@ -3270,10 +3186,11 @@
       Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
       // The generic unary operation stub expects the argument to be
       // in the accumulator register eax.
-      VisitForValue(expr->expression(), kAccumulator);
+      VisitForAccumulatorValue(expr->expression());
       Label done;
-      if (ShouldInlineSmiCase(expr->op())) {
-        Label call_stub;
+      bool inline_smi_case = ShouldInlineSmiCase(expr->op());
+      if (inline_smi_case) {
+        NearLabel call_stub;
         __ test(eax, Immediate(kSmiTagMask));
         __ j(not_zero, &call_stub);
         __ lea(eax, Operand(eax, kSmiTagMask));
@@ -3284,10 +3201,13 @@
       bool overwrite = expr->expression()->ResultOverwriteAllowed();
       UnaryOverwriteMode mode =
           overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::BIT_NOT, mode);
+      UnaryOpFlags flags = inline_smi_case
+          ? NO_UNARY_SMI_CODE_IN_STUB
+          : NO_UNARY_FLAGS;
+      GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
       __ CallStub(&stub);
       __ bind(&done);
-      Apply(context_, eax);
+      context()->Plug(eax);
       break;
     }
 
@@ -3323,24 +3243,21 @@
   // Evaluate expression and get value.
   if (assign_type == VARIABLE) {
     ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
-    Location saved_location = location_;
-    location_ = kAccumulator;
-    EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
-                     Expression::kValue);
-    location_ = saved_location;
+    AccumulatorValueContext context(this);
+    EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
   } else {
     // Reserve space for result of postfix operation.
-    if (expr->is_postfix() && context_ != Expression::kEffect) {
+    if (expr->is_postfix() && !context()->IsEffect()) {
       __ push(Immediate(Smi::FromInt(0)));
     }
     if (assign_type == NAMED_PROPERTY) {
       // Put the object both on the stack and in the accumulator.
-      VisitForValue(prop->obj(), kAccumulator);
+      VisitForAccumulatorValue(prop->obj());
       __ push(eax);
       EmitNamedPropertyLoad(prop);
     } else {
-      VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kAccumulator);
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
       __ mov(edx, Operand(esp, 0));
       __ push(eax);
       EmitKeyedPropertyLoad(prop);
@@ -3348,7 +3265,7 @@
   }
 
   // Call ToNumber only if operand is not a smi.
-  Label no_conversion;
+  NearLabel no_conversion;
   if (ShouldInlineSmiCase(expr->op())) {
     __ test(eax, Immediate(kSmiTagMask));
     __ j(zero, &no_conversion);
@@ -3359,34 +3276,27 @@
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
-    switch (context_) {
-      case Expression::kUninitialized:
-        UNREACHABLE();
-      case Expression::kEffect:
-        // Do not save result.
-        break;
-      case Expression::kValue:
-      case Expression::kTest:
-        // Save the result on the stack. If we have a named or keyed property
-        // we store the result under the receiver that is currently on top
-        // of the stack.
-        switch (assign_type) {
-          case VARIABLE:
-            __ push(eax);
-            break;
-          case NAMED_PROPERTY:
-            __ mov(Operand(esp, kPointerSize), eax);
-            break;
-          case KEYED_PROPERTY:
-            __ mov(Operand(esp, 2 * kPointerSize), eax);
-            break;
-        }
-        break;
+    if (!context()->IsEffect()) {
+      // Save the result on the stack. If we have a named or keyed property
+      // we store the result under the receiver that is currently on top
+      // of the stack.
+      switch (assign_type) {
+        case VARIABLE:
+          __ push(eax);
+          break;
+        case NAMED_PROPERTY:
+          __ mov(Operand(esp, kPointerSize), eax);
+          break;
+        case KEYED_PROPERTY:
+          __ mov(Operand(esp, 2 * kPointerSize), eax);
+          break;
+      }
     }
   }
 
   // Inline smi case if we are in a loop.
-  Label stub_call, done;
+  NearLabel stub_call;
+  Label done;
   if (ShouldInlineSmiCase(expr->op())) {
     if (expr->op() == Token::INC) {
       __ add(Operand(eax), Immediate(Smi::FromInt(1)));
@@ -3419,35 +3329,32 @@
     case VARIABLE:
       if (expr->is_postfix()) {
         // Perform the assignment as if via '='.
-        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN,
-                               Expression::kEffect);
-        // For all contexts except kEffect: We have the result on
+        { EffectContext context(this);
+          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                                 Token::ASSIGN);
+        }
+        // For all contexts except EffectContext We have the result on
         // top of the stack.
-        if (context_ != Expression::kEffect) {
-          ApplyTOS(context_);
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
         }
       } else {
         // Perform the assignment as if via '='.
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN,
-                               context_);
+                               Token::ASSIGN);
       }
       break;
     case NAMED_PROPERTY: {
       __ mov(ecx, prop->key()->AsLiteral()->handle());
       __ pop(edx);
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // This nop signals to the IC that there is no inlined code at the call
-      // site for it to patch.
-      __ nop();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
-        if (context_ != Expression::kEffect) {
-          ApplyTOS(context_);
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
         }
       } else {
-        Apply(context_, eax);
+        context()->Plug(eax);
       }
       break;
     }
@@ -3455,17 +3362,14 @@
       __ pop(ecx);
       __ pop(edx);
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // This nop signals to the IC that there is no inlined code at the call
-      // site for it to patch.
-      __ nop();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
         // Result is on the stack
-        if (context_ != Expression::kEffect) {
-          ApplyTOS(context_);
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
         }
       } else {
-        Apply(context_, eax);
+        context()->Plug(eax);
       }
       break;
     }
@@ -3473,8 +3377,11 @@
 }
 
 
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
   VariableProxy* proxy = expr->AsVariableProxy();
+  ASSERT(!context()->IsEffect());
+  ASSERT(!context()->IsTest());
+
   if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
     Comment cmnt(masm_, "Global variable");
     __ mov(eax, CodeGenerator::GlobalObject());
@@ -3482,16 +3389,16 @@
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    __ call(ic, RelocInfo::CODE_TARGET);
-    if (where == kStack) __ push(eax);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    context()->Plug(eax);
   } else if (proxy != NULL &&
-             proxy->var()->slot() != NULL &&
-             proxy->var()->slot()->type() == Slot::LOOKUP) {
+             proxy->var()->AsSlot() != NULL &&
+             proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
     Label done, slow;
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    Slot* slot = proxy->var()->slot();
+    Slot* slot = proxy->var()->AsSlot();
     EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
 
     __ bind(&slow);
@@ -3500,10 +3407,10 @@
     __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
     __ bind(&done);
 
-    if (where == kStack) __ push(eax);
+    context()->Plug(eax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitForValue(expr, where);
+    Visit(expr);
   }
 }
 
@@ -3525,7 +3432,10 @@
   if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
   Handle<String> check = Handle<String>::cast(right_literal_value);
 
-  VisitForTypeofValue(left_unary->expression(), kAccumulator);
+  { AccumulatorValueContext context(this);
+    VisitForTypeofValue(left_unary->expression());
+  }
+
   if (check->Equals(Heap::number_symbol())) {
     __ test(eax, Immediate(kSmiTagMask));
     __ j(zero, if_true);
@@ -3602,8 +3512,8 @@
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
@@ -3611,21 +3521,21 @@
   Expression* left = expr->left();
   Expression* right = expr->right();
   if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
-    Apply(context_, if_true, if_false);
+    context()->Plug(if_true, if_false);
     return;
   }
 
-  VisitForValue(expr->left(), kStack);
+  VisitForStackValue(expr->left());
   switch (expr->op()) {
     case Token::IN:
-      VisitForValue(expr->right(), kStack);
+      VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
       __ cmp(eax, Factory::true_value());
       Split(equal, if_true, if_false, fall_through);
       break;
 
     case Token::INSTANCEOF: {
-      VisitForValue(expr->right(), kStack);
+      VisitForStackValue(expr->right());
       InstanceofStub stub;
       __ CallStub(&stub);
       __ test(eax, Operand(eax));
@@ -3635,7 +3545,7 @@
     }
 
     default: {
-      VisitForValue(expr->right(), kAccumulator);
+      VisitForAccumulatorValue(expr->right());
       Condition cc = no_condition;
       bool strict = false;
       switch (op) {
@@ -3672,8 +3582,9 @@
           UNREACHABLE();
       }
 
-      if (ShouldInlineSmiCase(op)) {
-        Label slow_case;
+      bool inline_smi_code = ShouldInlineSmiCase(op);
+      if (inline_smi_code) {
+        NearLabel slow_case;
         __ mov(ecx, Operand(edx));
         __ or_(ecx, Operand(eax));
         __ test(ecx, Immediate(kSmiTagMask));
@@ -3683,7 +3594,10 @@
         __ bind(&slow_case);
       }
 
-      CompareStub stub(cc, strict);
+      CompareFlags flags = inline_smi_code
+          ? NO_SMI_COMPARE_IN_STUB
+          : NO_COMPARE_FLAGS;
+      CompareStub stub(cc, strict, flags);
       __ CallStub(&stub);
       __ test(eax, Operand(eax));
       Split(cc, if_true, if_false, fall_through);
@@ -3692,7 +3606,7 @@
 
   // Convert the result of the comparison into one expected for this
   // expression's context.
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -3701,10 +3615,10 @@
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
-  VisitForValue(expr->expression(), kAccumulator);
+  VisitForAccumulatorValue(expr->expression());
   __ cmp(eax, Factory::null_value());
   if (expr->is_strict()) {
     Split(equal, if_true, if_false, fall_through);
@@ -3720,20 +3634,46 @@
     __ test(edx, Immediate(1 << Map::kIsUndetectable));
     Split(not_zero, if_true, if_false, fall_through);
   }
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  Apply(context_, eax);
+  context()->Plug(eax);
 }
 
 
-Register FullCodeGenerator::result_register() { return eax; }
+Register FullCodeGenerator::result_register() {
+  return eax;
+}
 
 
-Register FullCodeGenerator::context_register() { return esi; }
+Register FullCodeGenerator::context_register() {
+  return esi;
+}
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+  ASSERT(mode == RelocInfo::CODE_TARGET ||
+         mode == RelocInfo::CODE_TARGET_CONTEXT);
+  __ call(ic, mode);
+
+  // If we're calling a (keyed) load or store stub, we have to mark
+  // the call as containing no inlined code so we will not attempt to
+  // patch it.
+  switch (ic->kind()) {
+    case Code::LOAD_IC:
+    case Code::KEYED_LOAD_IC:
+    case Code::STORE_IC:
+    case Code::KEYED_STORE_IC:
+      __ nop();  // Signals no inlined code.
+      break;
+    default:
+      // Do nothing.
+      break;
+  }
+}
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 3d0bd79..413c36e 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -692,7 +692,6 @@
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
-  Label index_out_of_range;
 
   Register receiver = edx;
   Register index = eax;
@@ -707,7 +706,7 @@
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
-                                          &index_out_of_range,
+                                          &miss,  // When index out of range.
                                           STRING_INDEX_IS_ARRAY_INDEX);
   char_at_generator.GenerateFast(masm);
   __ ret(0);
@@ -715,10 +714,6 @@
   ICRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm, call_helper);
 
-  __ bind(&index_out_of_range);
-  __ Set(eax, Immediate(Factory::undefined_value()));
-  __ ret(0);
-
   __ bind(&miss);
   GenerateMiss(masm);
 }
@@ -1666,6 +1661,38 @@
 }
 
 
+// One byte opcode for mov ecx,0xXXXXXXXX.
+static const byte kMovEcxByte = 0xB9;
+
+bool LoadIC::PatchInlinedContextualLoad(Address address,
+                                        Object* map,
+                                        Object* cell) {
+  // The address of the instruction following the call.
+  Address mov_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+  // If the instruction following the call is not a cmp eax, nothing
+  // was inlined.
+  if (*mov_instruction_address != kMovEcxByte) return false;
+
+  Address delta_address = mov_instruction_address + 1;
+  // The delta to the start of the map check instruction.
+  int delta = *reinterpret_cast<int*>(delta_address);
+
+  // The map address is the last 4 bytes of the 7-byte
+  // operand-immediate compare instruction, so we add 3 to get the
+  // offset to the last 4 bytes.
+  Address map_address = mov_instruction_address + delta + 3;
+  *(reinterpret_cast<Object**>(map_address)) = map;
+
+  // The cell is in the last 4 bytes of a five byte mov reg, imm32
+  // instruction, so we add 1 to get the offset to the last 4 bytes.
+  Address offset_address =
+      mov_instruction_address + delta + kOffsetToLoadInstruction + 1;
+  *reinterpret_cast<Object**>(offset_address) = cell;
+  return true;
+}
+
+
 bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
   // The address of the instruction following the call.
   Address test_instruction_address =
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 87e25d7..a62f74b 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1361,6 +1361,13 @@
 }
 
 
+void MacroAssembler::Move(Register dst, Register src) {
+  if (!dst.is(src)) {
+    mov(dst, src);
+  }
+}
+
+
 void MacroAssembler::Move(Register dst, Handle<Object> value) {
   mov(dst, value);
 }
@@ -1553,6 +1560,17 @@
 }
 
 
+void MacroAssembler::LoadPowerOf2(XMMRegister dst,
+                                  Register scratch,
+                                  int power) {
+  ASSERT(is_uintn(power + HeapNumber::kExponentBias,
+                  HeapNumber::kExponentBits));
+  mov(scratch, Immediate(power + HeapNumber::kExponentBias));
+  movd(dst, Operand(scratch));
+  psllq(dst, HeapNumber::kMantissaBits);
+}
+
+
 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
     Register instance_type,
     Register scratch,
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index a7534cb..fe3267c 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -258,6 +258,8 @@
                       TypeInfo info,
                       Label* on_not_int32);
 
+  void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
+
   // Abort execution if argument is not a number. Used in debug code.
   void AbortIfNotNumber(Register object);
 
@@ -503,6 +505,9 @@
 
   void Call(Label* target) { call(target); }
 
+  // Move if the registers are not identical.
+  void Move(Register target, Register source);
+
   void Move(Register target, Handle<Object> value);
 
   Handle<Object> CodeObject() { return code_object_; }
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 828e71a..dd0d636 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -265,7 +265,11 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype) {
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  // Check we're still in the same context.
+  __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
+         Top::global());
+  __ j(not_equal, miss);
   // Get the global function with the given index.
   JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
   // Load its initial map. The global functions all have initial maps.
@@ -1626,7 +1630,8 @@
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            eax);
+                                            eax,
+                                            &miss);
   ASSERT(object != holder);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
                   ebx, edx, edi, name, &miss);
@@ -1695,7 +1700,8 @@
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            eax);
+                                            eax,
+                                            &miss);
   ASSERT(object != holder);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
                   ebx, edx, edi, name, &miss);
@@ -1813,6 +1819,131 @@
 }
 
 
+Object* CallStubCompiler::CompileMathFloorCall(Object* object,
+                                               JSObject* holder,
+                                               JSGlobalPropertyCell* cell,
+                                               JSFunction* function,
+                                               String* name) {
+  // ----------- S t a t e -------------
+  //  -- ecx                 : name
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  if (!CpuFeatures::IsSupported(SSE2)) return Heap::undefined_value();
+  CpuFeatures::Scope use_sse2(SSE2);
+
+  const int argc = arguments().immediate();
+
+  // If the object is not a JSObject or we got an unexpected number of
+  // arguments, bail out to the regular call.
+  if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+
+  Label miss;
+  GenerateNameCheck(name, &miss);
+
+  if (cell == NULL) {
+    __ mov(edx, Operand(esp, 2 * kPointerSize));
+
+    STATIC_ASSERT(kSmiTag == 0);
+    __ test(edx, Immediate(kSmiTagMask));
+    __ j(zero, &miss);
+
+    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
+                    &miss);
+  } else {
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  // Load the (only) argument into eax.
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+  // Check if the argument is a smi.
+  Label smi;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &smi);
+
+  // Check if the argument is a heap number and load its value into xmm0.
+  Label slow;
+  __ CheckMap(eax, Factory::heap_number_map(), &slow, true);
+  __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+
+  // Check if the argument is strictly positive. Note this also
+  // discards NaN.
+  __ xorpd(xmm1, xmm1);
+  __ ucomisd(xmm0, xmm1);
+  __ j(below_equal, &slow);
+
+  // Do a truncating conversion.
+  __ cvttsd2si(eax, Operand(xmm0));
+
+  // Check if the result fits into a smi. Note this also checks for
+  // 0x80000000 which signals a failed conversion.
+  Label wont_fit_into_smi;
+  __ test(eax, Immediate(0xc0000000));
+  __ j(not_zero, &wont_fit_into_smi);
+
+  // Smi tag and return.
+  __ SmiTag(eax);
+  __ bind(&smi);
+  __ ret(2 * kPointerSize);
+
+  // Check if the argument is < 2^kMantissaBits.
+  Label already_round;
+  __ bind(&wont_fit_into_smi);
+  __ LoadPowerOf2(xmm1, ebx, HeapNumber::kMantissaBits);
+  __ ucomisd(xmm0, xmm1);
+  __ j(above_equal, &already_round);
+
+  // Save a copy of the argument.
+  __ movaps(xmm2, xmm0);
+
+  // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
+  __ addsd(xmm0, xmm1);
+  __ subsd(xmm0, xmm1);
+
+  // Compare the argument and the tentative result to get the right mask:
+  //   if xmm2 < xmm0:
+  //     xmm2 = 1...1
+  //   else:
+  //     xmm2 = 0...0
+  __ cmpltsd(xmm2, xmm0);
+
+  // Subtract 1 if the argument was less than the tentative result.
+  __ LoadPowerOf2(xmm1, ebx, 0);
+  __ andpd(xmm1, xmm2);
+  __ subsd(xmm0, xmm1);
+
+  // Return a new heap number.
+  __ AllocateHeapNumber(eax, ebx, edx, &slow);
+  __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+  __ ret(2 * kPointerSize);
+
+  // Return the argument (when it's an already round heap number).
+  __ bind(&already_round);
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+  __ ret(2 * kPointerSize);
+
+  // Tail call the full function. We do not have to patch the receiver
+  // because the function makes no use of it.
+  __ bind(&slow);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+  __ bind(&miss);
+  // ecx: function name.
+  Object* obj = GenerateMissBranch();
+  if (obj->IsFailure()) return obj;
+
+  // Return the generated code.
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+}
+
+
 Object* CallStubCompiler::CompileCallConstant(Object* object,
                                               JSObject* holder,
                                               JSFunction* function,
@@ -1894,7 +2025,7 @@
         __ j(above_equal, &miss, not_taken);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::STRING_FUNCTION_INDEX, eax);
+            masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
                         ebx, edx, edi, name, &miss);
       }
@@ -1914,7 +2045,7 @@
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::NUMBER_FUNCTION_INDEX, eax);
+            masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
                         ebx, edx, edi, name, &miss);
       }
@@ -1935,7 +2066,7 @@
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::BOOLEAN_FUNCTION_INDEX, eax);
+            masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
                         ebx, edx, edi, name, &miss);
       }
@@ -2324,7 +2455,10 @@
                                              name,
                                              edx,
                                              &miss);
-    if (cell->IsFailure()) return cell;
+    if (cell->IsFailure()) {
+      miss.Unuse();
+      return cell;
+    }
   }
 
   // Return undefined if maps of the full prototype chain are still the
@@ -2374,7 +2508,10 @@
   Failure* failure = Failure::InternalError();
   bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi,
                                       callback, name, &miss, &failure);
-  if (!success) return failure;
+  if (!success) {
+    miss.Unuse();
+    return failure;
+  }
 
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2474,12 +2611,12 @@
     __ Check(not_equal, "DontDelete cells can't contain the hole");
   }
 
-  __ IncrementCounter(&Counters::named_load_global_inline, 1);
+  __ IncrementCounter(&Counters::named_load_global_stub, 1);
   __ mov(eax, ebx);
   __ ret(0);
 
   __ bind(&miss);
-  __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+  __ IncrementCounter(&Counters::named_load_global_stub_miss, 1);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
@@ -2535,9 +2672,13 @@
   Failure* failure = Failure::InternalError();
   bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi,
                                       callback, name, &miss, &failure);
-  if (!success) return failure;
+  if (!success) {
+    miss.Unuse();
+    return failure;
+  }
 
   __ bind(&miss);
+
   __ DecrementCounter(&Counters::keyed_load_callback, 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 5f1e1e4..a31f6e8 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -1313,7 +1313,7 @@
 
   VariableProxy* proxy = expr->AsVariableProxy();
   if (proxy != NULL) {
-    Slot* slot = proxy->var()->slot();
+    Slot* slot = proxy->var()->AsSlot();
     if (slot->type() == Slot::LOCAL) {
       PushLocalAt(slot->index());
       return;
diff --git a/src/ic.cc b/src/ic.cc
index b4a333e..5b62a8a 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -299,6 +299,7 @@
   // present) to guarantee failure by holding an invalid map (the null
   // value).  The offset can be patched to anything.
   PatchInlinedLoad(address, Heap::null_value(), 0);
+  PatchInlinedContextualLoad(address, Heap::null_value(), Heap::null_value());
 }
 
 
@@ -720,6 +721,14 @@
 }
 
 
+#ifdef DEBUG
+#define TRACE_IC_NAMED(msg, name) \
+  if (FLAG_trace_ic) PrintF(msg, *(name)->ToCString())
+#else
+#define TRACE_IC_NAMED(msg, name)
+#endif
+
+
 Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
   // If the object is undefined or null it's illegal to try to get any
   // of its properties; throw a TypeError in that case.
@@ -797,15 +806,24 @@
     LOG(SuspectReadEvent(*name, *object));
   }
 
-  bool can_be_inlined =
+  bool can_be_inlined_precheck =
       FLAG_use_ic &&
-      state == PREMONOMORPHIC &&
       lookup.IsProperty() &&
       lookup.IsCacheable() &&
       lookup.holder() == *object &&
-      lookup.type() == FIELD &&
       !object->IsAccessCheckNeeded();
 
+  bool can_be_inlined =
+      can_be_inlined_precheck &&
+      state == PREMONOMORPHIC &&
+      lookup.type() == FIELD;
+
+  bool can_be_inlined_contextual =
+      can_be_inlined_precheck &&
+      state == UNINITIALIZED &&
+      lookup.holder()->IsGlobalObject() &&
+      lookup.type() == NORMAL;
+
   if (can_be_inlined) {
     Map* map = lookup.holder()->map();
     // Property's index in the properties array.  If negative we have
@@ -816,32 +834,29 @@
       int offset = map->instance_size() + (index * kPointerSize);
       if (PatchInlinedLoad(address(), map, offset)) {
         set_target(megamorphic_stub());
-#ifdef DEBUG
-        if (FLAG_trace_ic) {
-          PrintF("[LoadIC : inline patch %s]\n", *name->ToCString());
-        }
-#endif
+        TRACE_IC_NAMED("[LoadIC : inline patch %s]\n", name);
         return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
-#ifdef DEBUG
       } else {
-        if (FLAG_trace_ic) {
-          PrintF("[LoadIC : no inline patch %s (patching failed)]\n",
-                 *name->ToCString());
-        }
+        TRACE_IC_NAMED("[LoadIC : no inline patch %s (patching failed)]\n",
+                       name);
       }
     } else {
-      if (FLAG_trace_ic) {
-        PrintF("[LoadIC : no inline patch %s (not inobject)]\n",
-               *name->ToCString());
-      }
+      TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inobject)]\n", name);
+    }
+  } else if (can_be_inlined_contextual) {
+    Map* map = lookup.holder()->map();
+    JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
+        lookup.holder()->property_dictionary()->ValueAt(
+            lookup.GetDictionaryEntry()));
+    if (PatchInlinedContextualLoad(address(), map, cell)) {
+      set_target(megamorphic_stub());
+      TRACE_IC_NAMED("[LoadIC : inline contextual patch %s]\n", name);
+      ASSERT(cell->value() != Heap::the_hole_value());
+      return cell->value();
     }
   } else {
     if (FLAG_use_ic && state == PREMONOMORPHIC) {
-      if (FLAG_trace_ic) {
-        PrintF("[LoadIC : no inline patch %s (not inlinable)]\n",
-               *name->ToCString());
-#endif
-      }
+      TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inlinable)]\n", name);
     }
   }
 
diff --git a/src/ic.h b/src/ic.h
index 17450cc..a5fada0 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -298,6 +298,10 @@
 
   static bool PatchInlinedLoad(Address address, Object* map, int index);
 
+  static bool PatchInlinedContextualLoad(Address address,
+                                         Object* map,
+                                         Object* cell);
+
   friend class IC;
 };
 
diff --git a/src/list.h b/src/list.h
index 9abf61c..5a08212 100644
--- a/src/list.h
+++ b/src/list.h
@@ -67,12 +67,12 @@
   // Returns a reference to the element at index i.  This reference is
   // not safe to use after operations that can change the list's
   // backing store (eg, Add).
-  inline T& operator[](int i) const  {
+  inline T& operator[](int i) const {
     ASSERT(0 <= i);
     ASSERT(i < length_);
     return data_[i];
   }
-  inline T& at(int i) const  { return operator[](i); }
+  inline T& at(int i) const { return operator[](i); }
   inline T& last() const { return at(length_ - 1); }
   inline T& first() const { return at(0); }
 
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 41523a8..c07e83f 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -664,7 +664,7 @@
       int j = 0;
       for (int i = 0; i < list.length(); i++) {
         Variable* var1 = list[i];
-        Slot* slot = var1->slot();
+        Slot* slot = var1->AsSlot();
         if (slot != NULL && slot->type() == Slot::CONTEXT) {
           if (j != i) {
             list[j] = var1;
@@ -677,7 +677,7 @@
       for (int k = 1; k < j; k++) {
         int l = k;
         for (int m = k + 1; m < j; m++) {
-          if (list[l]->slot()->index() > list[m]->slot()->index()) {
+          if (list[l]->AsSlot()->index() > list[m]->AsSlot()->index()) {
             l = m;
           }
         }
@@ -687,7 +687,7 @@
         SetElement(scope_info_list, scope_info_length, list[i]->name());
         scope_info_length++;
         SetElement(scope_info_list, scope_info_length,
-                   Handle<Smi>(Smi::FromInt(list[i]->slot()->index())));
+                   Handle<Smi>(Smi::FromInt(list[i]->AsSlot()->index())));
         scope_info_length++;
       }
       SetElement(scope_info_list, scope_info_length,
diff --git a/src/log.cc b/src/log.cc
index 0bca5eb..5c70057 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -171,7 +171,9 @@
   SafeStackTraceFrameIterator it(sample->fp, sample->sp,
                                  sample->sp, js_entry_sp);
   while (!it.done() && i < TickSample::kMaxFramesCount) {
-    sample->stack[i++] = reinterpret_cast<Address>(it.frame()->function());
+    sample->stack[i++] =
+        reinterpret_cast<Address>(it.frame()->function_slot_object()) -
+            kHeapObjectTag;
     it.Advance();
   }
   sample->frames_count = i;
@@ -869,14 +871,17 @@
 
 void Logger::FunctionCreateEvent(JSFunction* function) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
+  // This function can be called from GC iterators (during Scavenge,
+  // MC, and MS), so marking bits can be set on objects. That's
+  // why unchecked accessors are used here.
   static Address prev_code = NULL;
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   msg.Append("%s,", log_events_[FUNCTION_CREATION_EVENT]);
   msg.AppendAddress(function->address());
   msg.Append(',');
-  msg.AppendAddress(function->code()->address(), prev_code);
-  prev_code = function->code()->address();
+  msg.AppendAddress(function->unchecked_code()->address(), prev_code);
+  prev_code = function->unchecked_code()->address();
   if (FLAG_compress_log) {
     ASSERT(compression_helper_ != NULL);
     if (!compression_helper_->HandleMessage(&msg)) return;
@@ -887,6 +892,16 @@
 }
 
 
+void Logger::FunctionCreateEventFromMove(JSFunction* function,
+                                         HeapObject*) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile)) {
+    FunctionCreateEvent(function);
+  }
+#endif
+}
+
+
 void Logger::FunctionMoveEvent(Address from, Address to) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   MoveEventInternal(FUNCTION_MOVE_EVENT, from, to);
diff --git a/src/log.h b/src/log.h
index 160072d..2534e1e 100644
--- a/src/log.h
+++ b/src/log.h
@@ -216,6 +216,8 @@
   static void CodeDeleteEvent(Address from);
   // Emits a function object create event.
   static void FunctionCreateEvent(JSFunction* function);
+  static void FunctionCreateEventFromMove(JSFunction* function,
+                                          HeapObject*);
   // Emits a function move event.
   static void FunctionMoveEvent(Address from, Address to);
   // Emits a function delete event.
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index a9e852e..c847b84 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -282,10 +282,7 @@
                                          FixedArray::BodyDescriptor,
                                          void>::Visit);
 
-    table_.Register(kVisitSharedFunctionInfo,
-                    &FixedBodyVisitor<StaticMarkingVisitor,
-                                      SharedFunctionInfo::BodyDescriptor,
-                                      void>::Visit);
+    table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
 
     table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
@@ -537,6 +534,17 @@
   }
 
 
+  static void VisitSharedFunctionInfo(Map* map, HeapObject* object) {
+    SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
+    if (shared->IsInobjectSlackTrackingInProgress()) {
+      shared->DetachInitialMap();
+    }
+    FixedBodyVisitor<StaticMarkingVisitor,
+                     SharedFunctionInfo::BodyDescriptor,
+                     void>::Visit(map, object);
+  }
+
+
   static void VisitCodeEntry(Address entry_address) {
     Object* code = Code::GetObjectFromEntryAddress(entry_address);
     Object* old_code = code;
@@ -1139,6 +1147,14 @@
     // Only JSObject and subtypes have map transitions and back pointers.
     if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
     if (map->instance_type() > JS_FUNCTION_TYPE) continue;
+
+    if (map->IsMarked() && map->attached_to_shared_function_info()) {
+      // This map is used for inobject slack tracking and has been detached
+      // from SharedFunctionInfo during the mark phase.
+      // Since it survived the GC, reattach it now.
+      map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
+    }
+
     // Follow the chain of back pointers to find the prototype.
     Map* current = map;
     while (SafeIsMap(current)) {
@@ -2504,6 +2520,7 @@
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   if (copied_to->IsJSFunction()) {
     PROFILE(FunctionMoveEvent(old_addr, new_addr));
+    PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to), obj));
   }
   HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
 
@@ -2596,6 +2613,7 @@
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   if (copied_to->IsJSFunction()) {
     PROFILE(FunctionMoveEvent(old_addr, new_addr));
+    PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to), obj));
   }
   HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
 
diff --git a/src/messages.js b/src/messages.js
index f26c3b5..4f492bc 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -684,6 +684,11 @@
   return FormatEvalOrigin(script);
 };
 
+CallSite.prototype.getScriptNameOrSourceURL = function () {
+  var script = %FunctionGetScript(this.fun);
+  return script ? script.nameOrSourceURL() : null;
+};
+
 CallSite.prototype.getFunction = function () {
   return this.fun;
 };
@@ -775,7 +780,11 @@
 };
 
 function FormatEvalOrigin(script) {
-  var eval_origin = "";
+  var sourceURL = script.nameOrSourceURL();
+  if (sourceURL)
+    return sourceURL;
+
+  var eval_origin = "eval at ";
   if (script.eval_from_function_name) {
     eval_origin += script.eval_from_function_name;
   } else {
@@ -786,9 +795,9 @@
   if (eval_from_script) {
     if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) {
       // eval script originated from another eval.
-      eval_origin += " (eval at " + FormatEvalOrigin(eval_from_script) + ")";
+      eval_origin += " (" + FormatEvalOrigin(eval_from_script) + ")";
     } else {
-      // eval script originated from "real" scource.
+      // eval script originated from "real" source.
       if (eval_from_script.name) {
         eval_origin += " (" + eval_from_script.name;
         var location = eval_from_script.locationFromPosition(script.eval_from_script_position, true);
@@ -807,25 +816,30 @@
 };
 
 function FormatSourcePosition(frame) {
+  var fileName;
   var fileLocation = "";
   if (frame.isNative()) {
     fileLocation = "native";
   } else if (frame.isEval()) {
-    fileLocation = "eval at " + frame.getEvalOrigin();
+    fileName = frame.getScriptNameOrSourceURL();
+    if (!fileName)
+      fileLocation = frame.getEvalOrigin();
   } else {
-    var fileName = frame.getFileName();
-    if (fileName) {
-      fileLocation += fileName;
-      var lineNumber = frame.getLineNumber();
-      if (lineNumber != null) {
-        fileLocation += ":" + lineNumber;
-        var columnNumber = frame.getColumnNumber();
-        if (columnNumber) {
-          fileLocation += ":" + columnNumber;
-        }
+    fileName = frame.getFileName();
+  }
+
+  if (fileName) {
+    fileLocation += fileName;
+    var lineNumber = frame.getLineNumber();
+    if (lineNumber != null) {
+      fileLocation += ":" + lineNumber;
+      var columnNumber = frame.getColumnNumber();
+      if (columnNumber) {
+        fileLocation += ":" + columnNumber;
       }
     }
   }
+
   if (!fileLocation) {
     fileLocation = "unknown source";
   }
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index b7c3ebc..a687c2b 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -73,13 +73,13 @@
 
 // Core register.
 struct Register {
-  bool is_valid() const  { return 0 <= code_ && code_ < kNumRegisters; }
-  bool is(Register reg) const  { return code_ == reg.code_; }
-  int code() const  {
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+  bool is(Register reg) const { return code_ == reg.code_; }
+  int code() const {
     ASSERT(is_valid());
     return code_;
   }
-  int bit() const  {
+  int bit() const {
     ASSERT(is_valid());
     return 1 << code_;
   }
@@ -129,13 +129,13 @@
 
 // Coprocessor register.
 struct FPURegister {
-  bool is_valid() const  { return 0 <= code_ && code_ < kNumFPURegister ; }
-  bool is(FPURegister creg) const  { return code_ == creg.code_; }
-  int code() const  {
+  bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegister ; }
+  bool is(FPURegister creg) const { return code_ == creg.code_; }
+  int code() const {
     ASSERT(is_valid());
     return code_;
   }
-  int bit() const  {
+  int bit() const {
     ASSERT(is_valid());
     return 1 << code_;
   }
@@ -665,4 +665,3 @@
 } }  // namespace v8::internal
 
 #endif  // V8_ARM_ASSEMBLER_MIPS_H_
-
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 75e7a29..66f891b 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -240,10 +240,10 @@
   void ProcessDeferred();
 
   // State
-  bool has_cc() const  { return cc_reg_ != cc_always; }
+  bool has_cc() const { return cc_reg_ != cc_always; }
   TypeofState typeof_state() const { return state_->typeof_state(); }
-  JumpTarget* true_target() const  { return state_->true_target(); }
-  JumpTarget* false_target() const  { return state_->false_target(); }
+  JumpTarget* true_target() const { return state_->true_target(); }
+  JumpTarget* false_target() const { return state_->false_target(); }
 
   // We don't track loop nesting level on mips yet.
   int loop_nesting() const { return 0; }
@@ -280,7 +280,7 @@
   MemOperand SlotOperand(Slot* slot, Register tmp);
 
   // Expressions
-  MemOperand GlobalObject() const  {
+  MemOperand GlobalObject() const {
     return ContextOperand(cp, Context::GLOBAL_INDEX);
   }
 
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index 0fce3cd..d630562 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -52,9 +52,7 @@
 }
 
 
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
-  if (fp == 0) return NONE;
-  // Compute frame type and stack pointer.
+Address ExitFrame::ComputeStackPointer(Address fp) {
   Address sp = fp + ExitFrameConstants::kSPDisplacement;
   const int offset = ExitFrameConstants::kCodeOffset;
   Object* code = Memory::Object_at(fp + offset);
@@ -62,11 +60,7 @@
   if (is_debug_exit) {
     sp -= kNumJSCallerSaved * kPointerSize;
   }
-  // Fill in the state.
-  state->sp = sp;
-  state->fp = fp;
-  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
-  return EXIT;
+  return sp;
 }
 
 
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 6d49d75..ed08468 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -649,8 +649,9 @@
 }
 
 
-void Map::NormalizedMapVerify() {
+void Map::SharedMapVerify() {
   MapVerify();
+  ASSERT(is_shared());
   ASSERT_EQ(Heap::empty_descriptor_array(), instance_descriptors());
   ASSERT_EQ(Heap::empty_fixed_array(), code_cache());
   ASSERT_EQ(0, pre_allocated_property_fields());
@@ -904,7 +905,7 @@
 
 void Code::CodeVerify() {
   CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
-                  static_cast<intptr_t>(kCodeAlignment)));
+                  kCodeAlignment));
   Address last_gc_pc = NULL;
   for (RelocIterator it(this); !it.done(); it.next()) {
     it.rinfo()->Verify();
@@ -1381,7 +1382,7 @@
     for (int i = 0; i < length(); i++) {
       Object* e = get(i);
       if (e->IsMap()) {
-        Map::cast(e)->NormalizedMapVerify();
+        Map::cast(e)->SharedMapVerify();
       } else {
         ASSERT(e->IsUndefined());
       }
diff --git a/src/objects-inl.h b/src/objects-inl.h
index bac224f..f63d672 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -83,7 +83,6 @@
   }
 
 
-
 #define SMI_ACCESSORS(holder, name, offset)             \
   int holder::name() {                                  \
     Object* value = READ_FIELD(this, offset);           \
@@ -1343,8 +1342,8 @@
 
 
 
-void JSObject::InitializeBody(int object_size) {
-  Object* value = Heap::undefined_value();
+void JSObject::InitializeBody(int object_size, Object* value) {
+  ASSERT(!value->IsHeapObject() || !Heap::InNewSpace(value));
   for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
     WRITE_FIELD(this, offset, value);
   }
@@ -2279,6 +2278,36 @@
 }
 
 
+void Map::set_attached_to_shared_function_info(bool value) {
+  if (value) {
+    set_bit_field2(bit_field2() | (1 << kAttachedToSharedFunctionInfo));
+  } else {
+    set_bit_field2(bit_field2() & ~(1 << kAttachedToSharedFunctionInfo));
+  }
+}
+
+bool Map::attached_to_shared_function_info() {
+  return ((1 << kAttachedToSharedFunctionInfo) & bit_field2()) != 0;
+}
+
+
+void Map::set_is_shared(bool value) {
+  if (value) {
+    set_bit_field2(bit_field2() | (1 << kIsShared));
+  } else {
+    set_bit_field2(bit_field2() & ~(1 << kIsShared));
+  }
+}
+
+bool Map::is_shared() {
+  return ((1 << kIsShared) & bit_field2()) != 0;
+}
+
+
+JSFunction* Map::unchecked_constructor() {
+  return reinterpret_cast<JSFunction*>(READ_FIELD(this, kConstructorOffset));
+}
+
 
 Code::Flags Code::flags() {
   return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
@@ -2571,6 +2600,7 @@
 
 ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
 ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
+ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
 ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
           kInstanceClassNameOffset)
 ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
@@ -2662,6 +2692,37 @@
               kThisPropertyAssignmentsCountOffset)
 #endif
 
+
+int SharedFunctionInfo::construction_count() {
+  return READ_BYTE_FIELD(this, kConstructionCountOffset);
+}
+
+
+void SharedFunctionInfo::set_construction_count(int value) {
+  ASSERT(0 <= value && value < 256);
+  WRITE_BYTE_FIELD(this, kConstructionCountOffset, static_cast<byte>(value));
+}
+
+
+bool SharedFunctionInfo::live_objects_may_exist() {
+  return (compiler_hints() & (1 << kLiveObjectsMayExist)) != 0;
+}
+
+
+void SharedFunctionInfo::set_live_objects_may_exist(bool value) {
+  if (value) {
+    set_compiler_hints(compiler_hints() | (1 << kLiveObjectsMayExist));
+  } else {
+    set_compiler_hints(compiler_hints() & ~(1 << kLiveObjectsMayExist));
+  }
+}
+
+
+bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
+  return initial_map() != Heap::undefined_value();
+}
+
+
 ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
 ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
 
@@ -3138,9 +3199,9 @@
   ASSERT(HasFastElements());
   FixedArray* elems = FixedArray::cast(elements());
   if (elems->map() != Heap::fixed_cow_array_map()) return elems;
-  Object* writable_elems = Heap::CopyFixedArray(elems);
+  Object* writable_elems = Heap::CopyFixedArrayWithMap(elems,
+                                                       Heap::fixed_array_map());
   if (writable_elems->IsFailure()) return writable_elems;
-  FixedArray::cast(writable_elems)->set_map(Heap::fixed_array_map());
   set_elements(FixedArray::cast(writable_elems));
   Counters::cow_arrays_converted.Increment();
   return writable_elems;
diff --git a/src/objects.cc b/src/objects.cc
index ef51851..737bf57 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1476,8 +1476,8 @@
   FixedArray* new_properties = 0;  // Will always be NULL or a valid pointer.
   int new_unused_property_fields = map()->unused_property_fields() - 1;
   if (map()->unused_property_fields() == 0) {
-     new_unused_property_fields = kFieldsAdded - 1;
-     Object* new_properties_unchecked =
+    new_unused_property_fields = kFieldsAdded - 1;
+    Object* new_properties_unchecked =
         properties()->CopySize(properties()->length() + kFieldsAdded);
     if (new_properties_unchecked->IsFailure()) return new_properties_unchecked;
     new_properties = FixedArray::cast(new_properties_unchecked);
@@ -2099,61 +2099,34 @@
 }
 
 
-bool NormalizedMapCache::IsCacheable(JSObject* object) {
-  // Caching for global objects is not worth it (there are too few of them).
-  return !object->IsGlobalObject();
-}
-
-
 Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) {
-  Object* result;
-
   Map* fast = obj->map();
-  if (!IsCacheable(obj)) {
-    result = fast->CopyNormalized(mode);
-    if (result->IsFailure()) return result;
-  } else {
-    int index = Hash(fast) % kEntries;
-    result = get(index);
-
-    if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
+  int index = Hash(fast) % kEntries;
+  Object* result = get(index);
+  if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
 #ifdef DEBUG
-      if (FLAG_enable_slow_asserts) {
-        // Make sure that the new slow map has exactly the same hash as the
-        // original fast map. This way we can use hash to check if a slow map
-        // is already in the hash (see Contains method).
-        ASSERT(Hash(fast) == Hash(Map::cast(result)));
-        // The cached map should match newly created normalized map bit-by-bit.
-        Object* fresh = fast->CopyNormalized(mode);
-        if (!fresh->IsFailure()) {
-          ASSERT(memcmp(Map::cast(fresh)->address(),
-                        Map::cast(result)->address(),
-                        Map::kSize) == 0);
-        }
+    if (FLAG_enable_slow_asserts) {
+      // The cached map should match newly created normalized map bit-by-bit.
+      Object* fresh = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
+      if (!fresh->IsFailure()) {
+        ASSERT(memcmp(Map::cast(fresh)->address(),
+                      Map::cast(result)->address(),
+                      Map::kSize) == 0);
       }
-#endif
-      return result;
     }
-
-    result = fast->CopyNormalized(mode);
-    if (result->IsFailure()) return result;
-    set(index, result);
+#endif
+    return result;
   }
+
+  result = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
+  if (result->IsFailure()) return result;
+  set(index, result);
   Counters::normalized_maps.Increment();
 
   return result;
 }
 
 
-bool NormalizedMapCache::Contains(Map* map) {
-  // If the map is present in the cache it can only be at one place:
-  // at the index calculated from the hash. We assume that a slow map has the
-  // same hash as a fast map it has been generated from.
-  int index = Hash(map) % kEntries;
-  return get(index) == map;
-}
-
-
 void NormalizedMapCache::Clear() {
   int entries = length();
   for (int i = 0; i != entries; i++) {
@@ -2184,7 +2157,7 @@
                                   Map* fast,
                                   PropertyNormalizationMode mode) {
 #ifdef DEBUG
-  slow->NormalizedMapVerify();
+  slow->SharedMapVerify();
 #endif
   return
     slow->constructor() == fast->constructor() &&
@@ -2194,17 +2167,17 @@
                                     fast->inobject_properties()) &&
     slow->instance_type() == fast->instance_type() &&
     slow->bit_field() == fast->bit_field() &&
-    slow->bit_field2() == fast->bit_field2();
+    (slow->bit_field2() & ~(1<<Map::kIsShared)) == fast->bit_field2();
 }
 
 
 Object* JSObject::UpdateMapCodeCache(String* name, Code* code) {
-  if (!HasFastProperties() &&
-      NormalizedMapCache::IsCacheable(this) &&
-      Top::context()->global_context()->normalized_map_cache()->
-          Contains(map())) {
-    // Replace the map with the identical copy that can be safely modified.
-    Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES);
+  if (map()->is_shared()) {
+    // Fast case maps are never marked as shared.
+    ASSERT(!HasFastProperties());
+    // Replace the map with an identical copy that can be safely modified.
+    Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES,
+                                        UNIQUE_NORMALIZED_MAP);
     if (obj->IsFailure()) return obj;
     Counters::normalized_maps.Increment();
 
@@ -3189,12 +3162,14 @@
   }
   Map::cast(result)->set_bit_field(bit_field());
   Map::cast(result)->set_bit_field2(bit_field2());
+  Map::cast(result)->set_is_shared(false);
   Map::cast(result)->ClearCodeCache();
   return result;
 }
 
 
-Object* Map::CopyNormalized(PropertyNormalizationMode mode) {
+Object* Map::CopyNormalized(PropertyNormalizationMode mode,
+                            NormalizedMapSharingMode sharing) {
   int new_instance_size = instance_size();
   if (mode == CLEAR_INOBJECT_PROPERTIES) {
     new_instance_size -= inobject_properties() * kPointerSize;
@@ -3213,8 +3188,12 @@
   Map::cast(result)->set_bit_field(bit_field());
   Map::cast(result)->set_bit_field2(bit_field2());
 
+  Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
+
 #ifdef DEBUG
-  Map::cast(result)->NormalizedMapVerify();
+  if (Map::cast(result)->is_shared()) {
+    Map::cast(result)->SharedMapVerify();
+  }
 #endif
 
   return result;
@@ -3271,6 +3250,47 @@
 }
 
 
+void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
+  Map* current = this;
+  while (current != Heap::meta_map()) {
+    DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
+        *RawField(current, Map::kInstanceDescriptorsOffset));
+    if (d == Heap::empty_descriptor_array()) {
+      Map* prev = current->map();
+      current->set_map(Heap::meta_map());
+      callback(current, data);
+      current = prev;
+      continue;
+    }
+
+    FixedArray* contents = reinterpret_cast<FixedArray*>(
+        d->get(DescriptorArray::kContentArrayIndex));
+    Object** map_or_index_field = RawField(contents, HeapObject::kMapOffset);
+    Object* map_or_index = *map_or_index_field;
+    bool map_done = true;
+    for (int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : 0;
+         i < contents->length();
+         i += 2) {
+      PropertyDetails details(Smi::cast(contents->get(i + 1)));
+      if (details.IsTransition()) {
+        Map* next = reinterpret_cast<Map*>(contents->get(i));
+        next->set_map(current);
+        *map_or_index_field = Smi::FromInt(i + 2);
+        current = next;
+        map_done = false;
+        break;
+      }
+    }
+    if (!map_done) continue;
+    *map_or_index_field = Heap::fixed_array_map();
+    Map* prev = current->map();
+    current->set_map(Heap::meta_map());
+    callback(current, data);
+    current = prev;
+  }
+}
+
+
 Object* CodeCache::Update(String* name, Code* code) {
   ASSERT(code->ic_state() == MONOMORPHIC);
 
@@ -3825,7 +3845,7 @@
 }
 
 
-void DescriptorArray::Sort() {
+void DescriptorArray::SortUnchecked() {
   // In-place heap sort.
   int len = number_of_descriptors();
 
@@ -3875,7 +3895,11 @@
       parent_index = child_index;
     }
   }
+}
 
+
+void DescriptorArray::Sort() {
+  SortUnchecked();
   SLOW_ASSERT(IsSortedNoDuplicates());
 }
 
@@ -5269,6 +5293,13 @@
 }
 
 
+void SharedFunctionInfo::ForbidInlineConstructor() {
+  set_compiler_hints(BooleanBit::set(compiler_hints(),
+                                     kHasOnlySimpleThisPropertyAssignments,
+                                     false));
+}
+
+
 void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
     bool only_simple_this_property_assignments,
     FixedArray* assignments) {
@@ -5366,6 +5397,107 @@
 }
 
 
+void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
+  ASSERT(!IsInobjectSlackTrackingInProgress());
+
+  // Only initiate the tracking the first time.
+  if (live_objects_may_exist()) return;
+  set_live_objects_may_exist(true);
+
+  // No tracking during the snapshot construction phase.
+  if (Serializer::enabled()) return;
+
+  if (map->unused_property_fields() == 0) return;
+
+  // Nonzero counter is a leftover from the previous attempt interrupted
+  // by GC, keep it.
+  if (construction_count() == 0) {
+    set_construction_count(kGenerousAllocationCount);
+  }
+  set_initial_map(map);
+  ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
+            construct_stub());
+  set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
+}
+
+
+// Called from GC, hence reinterpret_cast and unchecked accessors.
+void SharedFunctionInfo::DetachInitialMap() {
+  Map* map = reinterpret_cast<Map*>(initial_map());
+
+  // Make the map remember to restore the link if it survives the GC.
+  map->set_bit_field2(
+      map->bit_field2() | (1 << Map::kAttachedToSharedFunctionInfo));
+
+  // Undo state changes made by StartInobjectTracking (except the
+  // construction_count). This way if the initial map does not survive the GC
+  // then StartInobjectTracking will be called again the next time the
+  // constructor is called. The countdown will continue and (possibly after
+  // several more GCs) CompleteInobjectSlackTracking will eventually be called.
+  set_initial_map(Heap::raw_unchecked_undefined_value());
+  ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
+            *RawField(this, kConstructStubOffset));
+  set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
+  // It is safe to clear the flag: it will be set again if the map is live.
+  set_live_objects_may_exist(false);
+}
+
+
+// Called from GC, hence reinterpret_cast and unchecked accessors.
+void SharedFunctionInfo::AttachInitialMap(Map* map) {
+  map->set_bit_field2(
+      map->bit_field2() & ~(1 << Map::kAttachedToSharedFunctionInfo));
+
+  // Resume inobject slack tracking.
+  set_initial_map(map);
+  ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
+            *RawField(this, kConstructStubOffset));
+  set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
+  // The map survived the gc, so there may be objects referencing it.
+  set_live_objects_may_exist(true);
+}
+
+
+static void GetMinInobjectSlack(Map* map, void* data) {
+  int slack = map->unused_property_fields();
+  if (*reinterpret_cast<int*>(data) > slack) {
+    *reinterpret_cast<int*>(data) = slack;
+  }
+}
+
+
+static void ShrinkInstanceSize(Map* map, void* data) {
+  int slack = *reinterpret_cast<int*>(data);
+  map->set_inobject_properties(map->inobject_properties() - slack);
+  map->set_unused_property_fields(map->unused_property_fields() - slack);
+  map->set_instance_size(map->instance_size() - slack * kPointerSize);
+
+  // Visitor id might depend on the instance size, recalculate it.
+  map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
+}
+
+
+void SharedFunctionInfo::CompleteInobjectSlackTracking() {
+  ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
+  Map* map = Map::cast(initial_map());
+
+  set_initial_map(Heap::undefined_value());
+  ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
+            construct_stub());
+  set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
+
+  int slack = map->unused_property_fields();
+  map->TraverseTransitionTree(&GetMinInobjectSlack, &slack);
+  if (slack != 0) {
+    // Resize the initial map and all maps in its transition tree.
+    map->TraverseTransitionTree(&ShrinkInstanceSize, &slack);
+    // Give the correct expected_nof_properties to initial maps created later.
+    ASSERT(expected_nof_properties() >= slack);
+    set_expected_nof_properties(expected_nof_properties() - slack);
+  }
+}
+
+
 void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
   ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
   Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -5919,21 +6051,24 @@
 }
 
 
-bool JSObject::HasLocalElement(uint32_t index) {
+JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
   // Check access rights if needed.
   if (IsAccessCheckNeeded() &&
       !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
     Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
-    return false;
+    return UNDEFINED_ELEMENT;
   }
 
   // Check for lookup interceptor
   if (HasIndexedInterceptor()) {
-    return HasElementWithInterceptor(this, index);
+    return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT
+                                                  : UNDEFINED_ELEMENT;
   }
 
   // Handle [] on String objects.
-  if (this->IsStringObjectWithCharacterAt(index)) return true;
+  if (this->IsStringObjectWithCharacterAt(index)) {
+    return STRING_CHARACTER_ELEMENT;
+  }
 
   switch (GetElementsKind()) {
     case FAST_ELEMENTS: {
@@ -5941,12 +6076,16 @@
           static_cast<uint32_t>
               (Smi::cast(JSArray::cast(this)->length())->value()) :
           static_cast<uint32_t>(FixedArray::cast(elements())->length());
-      return (index < length) &&
-          !FixedArray::cast(elements())->get(index)->IsTheHole();
+      if ((index < length) &&
+          !FixedArray::cast(elements())->get(index)->IsTheHole()) {
+        return FAST_ELEMENT;
+      }
+      break;
     }
     case PIXEL_ELEMENTS: {
       PixelArray* pixels = PixelArray::cast(elements());
-      return (index < static_cast<uint32_t>(pixels->length()));
+      if (index < static_cast<uint32_t>(pixels->length())) return FAST_ELEMENT;
+      break;
     }
     case EXTERNAL_BYTE_ELEMENTS:
     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
@@ -5956,18 +6095,22 @@
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
     case EXTERNAL_FLOAT_ELEMENTS: {
       ExternalArray* array = ExternalArray::cast(elements());
-      return (index < static_cast<uint32_t>(array->length()));
+      if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
+      break;
     }
     case DICTIONARY_ELEMENTS: {
-      return element_dictionary()->FindEntry(index)
-          != NumberDictionary::kNotFound;
+      if (element_dictionary()->FindEntry(index) !=
+              NumberDictionary::kNotFound) {
+        return DICTIONARY_ELEMENT;
+      }
+      break;
     }
     default:
       UNREACHABLE();
       break;
   }
-  UNREACHABLE();
-  return Heap::null_value();
+
+  return UNDEFINED_ELEMENT;
 }
 
 
@@ -8710,11 +8853,11 @@
     // No free slot - extend break point info array.
     Handle<FixedArray> old_break_points =
         Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
-    debug_info->set_break_points(*Factory::NewFixedArray(
-        old_break_points->length() +
-            Debug::kEstimatedNofBreakPointsInFunction));
     Handle<FixedArray> new_break_points =
-        Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
+        Factory::NewFixedArray(old_break_points->length() +
+                               Debug::kEstimatedNofBreakPointsInFunction);
+
+    debug_info->set_break_points(*new_break_points);
     for (int i = 0; i < old_break_points->length(); i++) {
       new_break_points->set(i, old_break_points->get(i));
     }
diff --git a/src/objects.h b/src/objects.h
index 7f6538c..7f301b5 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -200,6 +200,14 @@
 };
 
 
+// NormalizedMapSharingMode is used to specify whether a map may be shared
+// by different objects with normalized properties.
+enum NormalizedMapSharingMode {
+  UNIQUE_NORMALIZED_MAP,
+  SHARED_NORMALIZED_MAP
+};
+
+
 // Instance size sentinel for objects of variable size.
 static const int kVariableSizeSentinel = 0;
 
@@ -1417,7 +1425,26 @@
   // Tells whether the index'th element is present.
   inline bool HasElement(uint32_t index);
   bool HasElementWithReceiver(JSObject* receiver, uint32_t index);
-  bool HasLocalElement(uint32_t index);
+
+  // Tells whether the index'th element is present and how it is stored.
+  enum LocalElementType {
+    // There is no element with given index.
+    UNDEFINED_ELEMENT,
+
+    // Element with given index is handled by interceptor.
+    INTERCEPTED_ELEMENT,
+
+    // Element with given index is character in string.
+    STRING_CHARACTER_ELEMENT,
+
+    // Element with given index is stored in fast backing store.
+    FAST_ELEMENT,
+
+    // Element with given index is stored in slow backing store.
+    DICTIONARY_ELEMENT
+  };
+
+  LocalElementType HasLocalElement(uint32_t index);
 
   bool HasElementWithInterceptor(JSObject* receiver, uint32_t index);
   bool HasElementPostInterceptor(JSObject* receiver, uint32_t index);
@@ -1576,7 +1603,7 @@
   // initialized by set_properties
   // Note: this call does not update write barrier, it is caller's
   // reponsibility to ensure that *v* can be collected without WB here.
-  inline void InitializeBody(int object_size);
+  inline void InitializeBody(int object_size, Object* value);
 
   // Check whether this object references another object
   bool ReferencesObject(Object* obj);
@@ -1892,6 +1919,11 @@
   MUST_USE_RESULT Object* RemoveTransitions();
 
   // Sort the instance descriptors by the hash codes of their keys.
+  // Does not check for duplicates.
+  void SortUnchecked();
+
+  // Sort the instance descriptors by the hash codes of their keys.
+  // Checks the result for duplicates.
   void Sort();
 
   // Search the instance descriptors for given name.
@@ -2485,12 +2517,8 @@
  public:
   static const int kEntries = 64;
 
-  static bool IsCacheable(JSObject* object);
-
   Object* Get(JSObject* object, PropertyNormalizationMode mode);
 
-  bool Contains(Map* map);
-
   void Clear();
 
   // Casting
@@ -2985,11 +3013,6 @@
   void CodePrint();
   void CodeVerify();
 #endif
-  // Code entry points are aligned to 32 bytes.
-  static const int kCodeAlignmentBits = 5;
-  static const int kCodeAlignment = 1 << kCodeAlignmentBits;
-  static const int kCodeAlignmentMask = kCodeAlignment - 1;
-
   // Layout description.
   static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
   static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
@@ -2998,8 +3021,7 @@
   // Add padding to align the instruction start following right after
   // the Code object header.
   static const int kHeaderSize =
-      (kKindSpecificFlagsOffset + kIntSize + kCodeAlignmentMask) &
-          ~kCodeAlignmentMask;
+      CODE_POINTER_ALIGN(kKindSpecificFlagsOffset + kIntSize);
 
   // Byte offsets within kKindSpecificFlagsOffset.
   static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset + 1;
@@ -3146,6 +3168,19 @@
     return ((1 << kHasFastElements) & bit_field2()) != 0;
   }
 
+  // Tells whether the map is attached to SharedFunctionInfo
+  // (for inobject slack tracking).
+  inline void set_attached_to_shared_function_info(bool value);
+
+  inline bool attached_to_shared_function_info();
+
+  // Tells whether the map is shared between objects that may have different
+  // behavior. If true, the map should never be modified, instead a clone
+  // should be created and modified.
+  inline void set_is_shared(bool value);
+
+  inline bool is_shared();
+
   // Tells whether the instance needs security checks when accessing its
   // properties.
   inline void set_is_access_check_needed(bool access_check_needed);
@@ -3157,6 +3192,8 @@
   // [constructor]: points back to the function responsible for this map.
   DECL_ACCESSORS(constructor, Object)
 
+  inline JSFunction* unchecked_constructor();
+
   // [instance descriptors]: describes the object.
   DECL_ACCESSORS(instance_descriptors, DescriptorArray)
 
@@ -3165,7 +3202,8 @@
 
   MUST_USE_RESULT Object* CopyDropDescriptors();
 
-  MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode);
+  MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode,
+                                         NormalizedMapSharingMode sharing);
 
   // Returns a copy of the map, with all transitions dropped from the
   // instance descriptors.
@@ -3229,12 +3267,16 @@
 #ifdef DEBUG
   void MapPrint();
   void MapVerify();
-  void NormalizedMapVerify();
+  void SharedMapVerify();
 #endif
 
   inline int visitor_id();
   inline void set_visitor_id(int visitor_id);
 
+  typedef void (*TraverseCallback)(Map* map, void* data);
+
+  void TraverseTransitionTree(TraverseCallback callback, void* data);
+
   static const int kMaxPreAllocatedPropertyFields = 255;
 
   // Layout description.
@@ -3288,6 +3330,8 @@
   static const int kFunctionWithPrototype = 1;
   static const int kHasFastElements = 2;
   static const int kStringWrapperSafeForDefaultValueOf = 3;
+  static const int kAttachedToSharedFunctionInfo = 4;
+  static const int kIsShared = 5;
 
   // Layout of the default cache. It holds alternating name and code objects.
   static const int kCodeCacheEntrySize = 2;
@@ -3442,6 +3486,100 @@
   inline int expected_nof_properties();
   inline void set_expected_nof_properties(int value);
 
+  // Inobject slack tracking is the way to reclaim unused inobject space.
+  //
+  // The instance size is initially determined by adding some slack to
+  // expected_nof_properties (to allow for a few extra properties added
+  // after the constructor). There is no guarantee that the extra space
+  // will not be wasted.
+  //
+  // Here is the algorithm to reclaim the unused inobject space:
+  // - Detect the first constructor call for this SharedFunctionInfo.
+  //   When it happens enter the "in progress" state: remember the
+  //   constructor's initial_map and install a special construct stub that
+  //   counts constructor calls.
+  // - While the tracking is in progress create objects filled with
+  //   one_pointer_filler_map instead of undefined_value. This way they can be
+  //   resized quickly and safely.
+  // - Once enough (kGenerousAllocationCount) objects have been created
+  //   compute the 'slack' (traverse the map transition tree starting from the
+  //   initial_map and find the lowest value of unused_property_fields).
+  // - Traverse the transition tree again and decrease the instance size
+  //   of every map. Existing objects will resize automatically (they are
+  //   filled with one_pointer_filler_map). All further allocations will
+  //   use the adjusted instance size.
+  // - Decrease expected_nof_properties so that an allocations made from
+  //   another context will use the adjusted instance size too.
+  // - Exit "in progress" state by clearing the reference to the initial_map
+  //   and setting the regular construct stub (generic or inline).
+  //
+  //  The above is the main event sequence. Some special cases are possible
+  //  while the tracking is in progress:
+  //
+  // - GC occurs.
+  //   Check if the initial_map is referenced by any live objects (except this
+  //   SharedFunctionInfo). If it is, continue tracking as usual.
+  //   If it is not, clear the reference and reset the tracking state. The
+  //   tracking will be initiated again on the next constructor call.
+  //
+  // - The constructor is called from another context.
+  //   Immediately complete the tracking, perform all the necessary changes
+  //   to maps. This is  necessary because there is no efficient way to track
+  //   multiple initial_maps.
+  //   Proceed to create an object in the current context (with the adjusted
+  //   size).
+  //
+  // - A different constructor function sharing the same SharedFunctionInfo is
+  //   called in the same context. This could be another closure in the same
+  //   context, or the first function could have been disposed.
+  //   This is handled the same way as the previous case.
+  //
+  //  Important: inobject slack tracking is not attempted during the snapshot
+  //  creation.
+
+  static const int kGenerousAllocationCount = 16;
+
+  // [construction_count]: Counter for constructor calls made during
+  // the tracking phase.
+  inline int construction_count();
+  inline void set_construction_count(int value);
+
+  // [initial_map]: initial map of the first function called as a constructor.
+  // Saved for the duration of the tracking phase.
+  // This is a weak link (GC resets it to undefined_value if no other live
+  // object reference this map).
+  DECL_ACCESSORS(initial_map, Object)
+
+  // True if the initial_map is not undefined and the countdown stub is
+  // installed.
+  inline bool IsInobjectSlackTrackingInProgress();
+
+  // Starts the tracking.
+  // Stores the initial map and installs the countdown stub.
+  // IsInobjectSlackTrackingInProgress is normally true after this call,
+  // except when tracking have not been started (e.g. the map has no unused
+  // properties or the snapshot is being built).
+  void StartInobjectSlackTracking(Map* map);
+
+  // Completes the tracking.
+  // IsInobjectSlackTrackingInProgress is false after this call.
+  void CompleteInobjectSlackTracking();
+
+  // Clears the initial_map before the GC marking phase to ensure the reference
+  // is weak. IsInobjectSlackTrackingInProgress is false after this call.
+  void DetachInitialMap();
+
+  // Restores the link to the initial map after the GC marking phase.
+  // IsInobjectSlackTrackingInProgress is true after this call.
+  void AttachInitialMap(Map* map);
+
+  // False if there are definitely no live objects created from this function.
+  // True if live objects _may_ exist (existence not guaranteed).
+  // May go back from true to false after GC.
+  inline bool live_objects_may_exist();
+
+  inline void set_live_objects_may_exist(bool value);
+
   // [instance class name]: class name for instances.
   DECL_ACCESSORS(instance_class_name, Object)
 
@@ -3542,6 +3680,10 @@
   // prototype.
   bool CanGenerateInlineConstructor(Object* prototype);
 
+  // Prevents further attempts to generate inline constructors.
+  // To be called if generation failed for any reason.
+  void ForbidInlineConstructor();
+
   // For functions which only contains this property assignments this provides
   // access to the names for the properties assigned.
   DECL_ACCESSORS(this_property_assignments, Object)
@@ -3589,8 +3731,10 @@
   static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
   static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
   static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
-  static const int kThisPropertyAssignmentsOffset =
+  static const int kInitialMapOffset =
       kInferredNameOffset + kPointerSize;
+  static const int kThisPropertyAssignmentsOffset =
+      kInitialMapOffset + kPointerSize;
 #if V8_HOST_ARCH_32_BIT
   // Smi fields.
   static const int kLengthOffset =
@@ -3614,7 +3758,7 @@
   static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize;
 #else
   // The only reason to use smi fields instead of int fields
-  // is to allow interation without maps decoding during
+  // is to allow iteration without maps decoding during
   // garbage collections.
   // To avoid wasting space on 64-bit architectures we use
   // the following trick: we group integer fields into pairs
@@ -3649,6 +3793,18 @@
   static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize;
 
 #endif
+
+  // The construction counter for inobject slack tracking is stored in the
+  // most significant byte of compiler_hints which is otherwise unused.
+  // Its offset depends on the endian-ness of the architecture.
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+  static const int kConstructionCountOffset = kCompilerHintsOffset + 3;
+#elif __BYTE_ORDER == __BIG_ENDIAN
+  static const int kConstructionCountOffset = kCompilerHintsOffset + 0;
+#else
+#error Unknown byte ordering
+#endif
+
   static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
 
   typedef FixedBodyDescriptor<kNameOffset,
@@ -3668,7 +3824,8 @@
   static const int kHasOnlySimpleThisPropertyAssignments = 0;
   static const int kTryFullCodegen = 1;
   static const int kAllowLazyCompilation = 2;
-  static const int kCodeAgeShift = 3;
+  static const int kLiveObjectsMayExist = 3;
+  static const int kCodeAgeShift = 4;
   static const int kCodeAgeMask = 7;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
diff --git a/src/parser.cc b/src/parser.cc
index 11e2eb5..a3f469a 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -156,12 +156,12 @@
   ScriptDataImpl* pre_data_;
   FuncNameInferrer* fni_;
 
-  bool inside_with() const  { return with_nesting_level_ > 0; }
-  ParserFactory* factory() const  { return factory_; }
+  bool inside_with() const { return with_nesting_level_ > 0; }
+  ParserFactory* factory() const { return factory_; }
   ParserLog* log() const { return log_; }
   Scanner& scanner()  { return scanner_; }
-  Mode mode() const  { return mode_; }
-  ScriptDataImpl* pre_data() const  { return pre_data_; }
+  Mode mode() const { return mode_; }
+  ScriptDataImpl* pre_data() const { return pre_data_; }
 
   // All ParseXXX functions take as the last argument an *ok parameter
   // which is set to false if parsing failed; it is unchanged otherwise.
@@ -996,21 +996,23 @@
     int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
     if (id == 0) {
       // Put (symbol_id_ + 1) into entry and increment it.
-      symbol_id_++;
-      entry->value = reinterpret_cast<void*>(symbol_id_);
+      id = ++symbol_id_;
+      entry->value = reinterpret_cast<void*>(id);
       Vector<Vector<const char> > symbol = symbol_entries_.AddBlock(1, literal);
       entry->key = &symbol[0];
-    } else {
-      // Log a reuse of an earlier seen symbol.
-      symbol_store_.Add(start);
-      symbol_store_.Add(id - 1);
     }
+    WriteNumber(id - 1);
   }
 
   virtual Vector<unsigned> ExtractData() {
     int function_size = function_store_.size();
+    // Add terminator to symbols, then pad to unsigned size.
     int symbol_size = symbol_store_.size();
-    int total_size = ScriptDataImpl::kHeaderSize + function_size + symbol_size;
+    int padding = sizeof(unsigned) - (symbol_size % sizeof(unsigned));
+    symbol_store_.AddBlock(padding, ScriptDataImpl::kNumberTerminator);
+    symbol_size += padding;
+    int total_size = ScriptDataImpl::kHeaderSize + function_size
+        + (symbol_size / sizeof(unsigned));
     Vector<unsigned> data = Vector<unsigned>::New(total_size);
     preamble_[ScriptDataImpl::kFunctionsSizeOffset] = function_size;
     preamble_[ScriptDataImpl::kSymbolCountOffset] = symbol_id_;
@@ -1020,8 +1022,9 @@
       function_store_.WriteTo(data.SubVector(ScriptDataImpl::kHeaderSize,
                                              symbol_start));
     }
-    if (symbol_size > 0) {
-      symbol_store_.WriteTo(data.SubVector(symbol_start, total_size));
+    if (!has_error()) {
+      symbol_store_.WriteTo(
+          Vector<byte>::cast(data.SubVector(symbol_start, total_size)));
     }
     return data;
   }
@@ -1029,12 +1032,7 @@
   virtual int symbol_position() { return symbol_store_.size(); }
   virtual int symbol_ids() { return symbol_id_; }
  private:
-  Collector<unsigned> symbol_store_;
-  Collector<Vector<const char> > symbol_entries_;
-  HashMap symbol_table_;
-  int symbol_id_;
-
-  static int vector_hash(Vector<const char> string) {
+    static int vector_hash(Vector<const char> string) {
     int hash = 0;
     for (int i = 0; i < string.length(); i++) {
       int c = string[i];
@@ -1052,6 +1050,14 @@
     if (string2->length() != length) return false;
     return memcmp(string1->start(), string2->start(), length) == 0;
   }
+
+  // Write a non-negative number to the symbol store.
+  void WriteNumber(int number);
+
+  Collector<byte> symbol_store_;
+  Collector<Vector<const char> > symbol_entries_;
+  HashMap symbol_table_;
+  int symbol_id_;
 };
 
 
@@ -1076,18 +1082,11 @@
 }
 
 
-int ScriptDataImpl::GetSymbolIdentifier(int start) {
-  int next = symbol_index_ + 2;
-  if (next <= store_.length()
-      && static_cast<int>(store_[symbol_index_]) == start) {
-    symbol_index_ = next;
-    return store_[next - 1];
-  }
-  return symbol_id_++;
+int ScriptDataImpl::GetSymbolIdentifier() {
+  return ReadNumber(&symbol_data_);
 }
 
 
-
 bool ScriptDataImpl::SanityCheck() {
   // Check that the header data is valid and doesn't specify
   // point to positions outside the store.
@@ -1118,7 +1117,7 @@
   int symbol_count =
       static_cast<int>(store_[ScriptDataImpl::kSymbolCountOffset]);
   if (symbol_count < 0) return false;
-  // Check that the total size has room both function entries.
+  // Check that the total size has room for header and function entries.
   int minimum_size =
       ScriptDataImpl::kHeaderSize + functions_size;
   if (store_.length() < minimum_size) return false;
@@ -1158,6 +1157,22 @@
 }
 
 
+void CompleteParserRecorder::WriteNumber(int number) {
+  ASSERT(number >= 0);
+
+  int mask = (1 << 28) - 1;
+  for (int i = 28; i > 0; i -= 7) {
+    if (number > mask) {
+      symbol_store_.Add(static_cast<byte>(number >> i) | 0x80u);
+      number &= mask;
+    }
+    mask >>= 7;
+  }
+  symbol_store_.Add(static_cast<byte>(number));
+}
+
+
+
 const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) {
   int length = start[0];
   char* result = NewArray<char>(length + 1);
@@ -1206,7 +1221,8 @@
 Vector<const char*> ScriptDataImpl::BuildArgs() {
   int arg_count = Read(kMessageArgCountPos);
   const char** array = NewArray<const char*>(arg_count);
-  // Position after the string starting at position 3.
+  // Position after text found by skipping past length field and
+  // length field content words.
   int pos = kMessageTextPos + 1 + Read(kMessageTextPos);
   for (int i = 0; i < arg_count; i++) {
     int count = 0;
@@ -1287,7 +1303,8 @@
  public:
   CompletePreParser(Handle<Script> script, bool allow_natives_syntax,
                     v8::Extension* extension)
-      : PreParser(script, allow_natives_syntax, extension, &recorder_) { }
+      : PreParser(script, allow_natives_syntax, extension, &recorder_),
+        recorder_()  { }
   virtual PartialParserRecorder* recorder() { return &recorder_; }
  private:
   CompleteParserRecorder recorder_;
@@ -1298,7 +1315,8 @@
  public:
   PartialPreParser(Handle<Script> script, bool allow_natives_syntax,
                    v8::Extension* extension)
-      : PreParser(script, allow_natives_syntax, extension, &recorder_) { }
+      : PreParser(script, allow_natives_syntax, extension, &recorder_),
+        recorder_() { }
   virtual PartialParserRecorder* recorder() { return &recorder_; }
  private:
   PartialParserRecorder recorder_;
@@ -1439,7 +1457,7 @@
                ParserLog* log,
                ScriptDataImpl* pre_data)
     : script_(script),
-      scanner_(is_pre_parsing),
+      scanner_(),
       top_scope_(NULL),
       with_nesting_level_(0),
       temp_scope_(NULL),
@@ -1485,6 +1503,7 @@
   source->TryFlatten();
   scanner_.Initialize(source, JAVASCRIPT);
   ASSERT(target_stack_ == NULL);
+  if (pre_data_ != NULL) pre_data_->Initialize();
 
   // Compute the parsing mode.
   mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
@@ -1639,17 +1658,12 @@
 
 
 Handle<String> Parser::GetSymbol(bool* ok) {
-  if (pre_data() != NULL) {
-    int symbol_id =
-        pre_data()->GetSymbolIdentifier(scanner_.location().beg_pos);
-    if (symbol_id < 0) {
-      ReportInvalidPreparseData(Factory::empty_symbol(), ok);
-      return Handle<String>::null();
-    }
-    return factory()->LookupSymbol(symbol_id, scanner_.literal());
-  }
   log()->LogSymbol(scanner_.location().beg_pos, scanner_.literal());
-  return factory()->LookupSymbol(-1, scanner_.literal());
+  int symbol_id = -1;
+  if (pre_data() != NULL) {
+    symbol_id = pre_data()->GetSymbolIdentifier();
+  }
+  return factory()->LookupSymbol(symbol_id, scanner_.literal());
 }
 
 
@@ -4176,8 +4190,7 @@
       Counters::total_preparse_skipped.Increment(end_pos - function_block_pos);
       scanner_.SeekForward(end_pos);
       pre_data()->Skip(entry.predata_function_skip(),
-                       entry.predata_symbol_skip(),
-                       entry.symbol_id_skip());
+                       entry.predata_symbol_skip());
       materialized_literal_count = entry.literal_count();
       expected_property_count = entry.property_count();
       only_simple_this_property_assignments = false;
@@ -4191,7 +4204,6 @@
       FunctionEntry entry = log()->LogFunction(function_block_pos);
       int predata_function_position_before = log()->function_position();
       int predata_symbol_position_before = log()->symbol_position();
-      int symbol_ids_before = log()->symbol_ids();
       ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
       materialized_literal_count = temp_scope.materialized_literal_count();
       expected_property_count = temp_scope.expected_property_count();
@@ -4209,8 +4221,6 @@
             log()->function_position() - predata_function_position_before);
         entry.set_predata_symbol_skip(
             log()->symbol_position() - predata_symbol_position_before);
-        entry.set_symbol_id_skip(
-            log()->symbol_ids() - symbol_ids_before);
       }
     }
 
@@ -4243,58 +4253,43 @@
 
   Expect(Token::MOD, CHECK_OK);
   Handle<String> name = ParseIdentifier(CHECK_OK);
-  Runtime::Function* function =
-      Runtime::FunctionForName(scanner_.literal());
   ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
-  if (function == NULL && extension_ != NULL) {
+  if (is_pre_parsing_) return NULL;
+
+  if (extension_ != NULL) {
     // The extension structures are only accessible while parsing the
     // very first time not when reparsing because of lazy compilation.
     top_scope_->ForceEagerCompilation();
   }
 
-  // Check for built-in macros.
-  if (!is_pre_parsing_) {
-    if (function == Runtime::FunctionForId(Runtime::kIS_VAR)) {
-      // %IS_VAR(x)
-      //   evaluates to x if x is a variable,
-      //   leads to a parse error otherwise
-      if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
-        return args->at(0);
-      }
-      *ok = false;
-    // Check here for other macros.
-    // } else if (function == Runtime::FunctionForId(Runtime::kIS_VAR)) {
-    //   ...
-    }
+  Runtime::Function* function = Runtime::FunctionForSymbol(name);
 
-    if (!*ok) {
-      // We found a macro but it failed.
+  // Check for built-in IS_VAR macro.
+  if (function != NULL &&
+      function->intrinsic_type == Runtime::RUNTIME &&
+      function->function_id == Runtime::kIS_VAR) {
+    // %IS_VAR(x) evaluates to x if x is a variable,
+    // leads to a parse error otherwise.  Could be implemented as an
+    // inline function %_IS_VAR(x) to eliminate this special case.
+    if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
+      return args->at(0);
+    } else {
       ReportMessage("unable_to_parse", Vector<const char*>::empty());
-      return NULL;
-    }
-  }
-
-  // Check that the expected number arguments are passed to runtime functions.
-  if (!is_pre_parsing_) {
-    if (function != NULL
-        && function->nargs != -1
-        && function->nargs != args->length()) {
-      ReportMessage("illegal_access", Vector<const char*>::empty());
       *ok = false;
       return NULL;
-    } else if (function == NULL && !name.is_null()) {
-      // If this is not a runtime function implemented in C++ it might be an
-      // inlined runtime function.
-      int argc = CodeGenerator::InlineRuntimeCallArgumentsCount(name);
-      if (argc != -1 && argc != args->length()) {
-        ReportMessage("illegal_access", Vector<const char*>::empty());
-        *ok = false;
-        return NULL;
-      }
     }
   }
 
-  // Otherwise we have a valid runtime call.
+  // Check that the expected number of arguments are being passed.
+  if (function != NULL &&
+      function->nargs != -1 &&
+      function->nargs != args->length()) {
+    ReportMessage("illegal_access", Vector<const char*>::empty());
+    *ok = false;
+    return NULL;
+  }
+
+  // We have a valid intrinsics call or a call to a builtin.
   return NEW(CallRuntime(name, function, args));
 }
 
@@ -5497,6 +5492,49 @@
 }
 
 
+void ScriptDataImpl::Initialize() {
+  // Prepares state for use.
+  if (store_.length() >= kHeaderSize) {
+    function_index_ = kHeaderSize;
+    int symbol_data_offset = kHeaderSize + store_[kFunctionsSizeOffset];
+    if (store_.length() > symbol_data_offset) {
+      symbol_data_ = reinterpret_cast<byte*>(&store_[symbol_data_offset]);
+    } else {
+      // Partial preparse causes no symbol information.
+      symbol_data_ = reinterpret_cast<byte*>(&store_[0] + store_.length());
+    }
+    symbol_data_end_ = reinterpret_cast<byte*>(&store_[0] + store_.length());
+  }
+}
+
+
+int ScriptDataImpl::ReadNumber(byte** source) {
+  // Reads a number from symbol_data_ in base 128. The most significant
+  // bit marks that there are more digits.
+  // If the first byte is 0x80 (kNumberTerminator), it would normally
+  // represent a leading zero. Since that is useless, and therefore won't
+  // appear as the first digit of any actual value, it is used to
+  // mark the end of the input stream.
+  byte* data = *source;
+  if (data >= symbol_data_end_) return -1;
+  byte input = *data;
+  if (input == kNumberTerminator) {
+    // End of stream marker.
+    return -1;
+  }
+  int result = input & 0x7f;
+  data++;
+  while ((input & 0x80u) != 0) {
+    if (data >= symbol_data_end_) return -1;
+    input = *data;
+    result = (result << 7) | (input & 0x7f);
+    data++;
+  }
+  *source = data;
+  return result;
+}
+
+
 ScriptDataImpl* PreParse(Handle<String> source,
                          unibrow::CharacterStream* stream,
                          v8::Extension* extension) {
diff --git a/src/parser.h b/src/parser.h
index c3e947f..8c00857 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -82,15 +82,9 @@
     backing_[kPredataSymbolSkipOffset] = value;
   }
 
-  int symbol_id_skip() { return backing_[kSymbolIdSkipOffset]; }
-  void set_symbol_id_skip(int value) {
-    backing_[kSymbolIdSkipOffset] = value;
-  }
-
-
   bool is_valid() { return backing_.length() > 0; }
 
-  static const int kSize = 7;
+  static const int kSize = 6;
 
  private:
   Vector<unsigned> backing_;
@@ -100,7 +94,6 @@
   static const int kPropertyCountOffset = 3;
   static const int kPredataFunctionSkipOffset = 4;
   static const int kPredataSymbolSkipOffset = 5;
-  static const int kSymbolIdSkipOffset = 6;
 };
 
 
@@ -108,18 +101,7 @@
  public:
   explicit ScriptDataImpl(Vector<unsigned> store)
       : store_(store),
-        function_index_(kHeaderSize),
-        symbol_id_(0),
-        owns_store_(true) {
-    Initialize();
-  }
-
-  void Initialize() {
-    if (store_.length() >= kHeaderSize) {
-      // Otherwise we won't satisfy the SanityCheck.
-      symbol_index_ = kHeaderSize + store_[kFunctionsSizeOffset];
-    }
-  }
+        owns_store_(true) { }
 
   // Create an empty ScriptDataImpl that is guaranteed to not satisfy
   // a SanityCheck.
@@ -130,8 +112,11 @@
   virtual const char* Data();
   virtual bool HasError();
 
+  void Initialize();
+  void ReadNextSymbolPosition();
+
   FunctionEntry GetFunctionEntry(int start);
-  int GetSymbolIdentifier(int start);
+  int GetSymbolIdentifier();
   void SkipFunctionEntry(int start);
   bool SanityCheck();
 
@@ -149,19 +134,27 @@
   unsigned version() { return store_[kVersionOffset]; }
 
   // Skip forward in the preparser data by the given number
-  // of unsigned ints.
-  virtual void Skip(int function_entries, int symbol_entries, int symbol_ids) {
+  // of unsigned ints of function entries and the given number of bytes of
+  // symbol id encoding.
+  void Skip(int function_entries, int symbol_entries) {
     ASSERT(function_entries >= 0);
     ASSERT(function_entries
            <= (static_cast<int>(store_[kFunctionsSizeOffset])
                - (function_index_ - kHeaderSize)));
-    function_index_ += function_entries;
-    symbol_index_ += symbol_entries;
-    symbol_id_ += symbol_ids;
+    ASSERT(symbol_entries >= 0);
+    ASSERT(symbol_entries <= symbol_data_end_ - symbol_data_);
+
+    unsigned max_function_skip = store_[kFunctionsSizeOffset] -
+        static_cast<unsigned>(function_index_ - kHeaderSize);
+    function_index_ +=
+        Min(static_cast<unsigned>(function_entries), max_function_skip);
+    symbol_data_ +=
+        Min(static_cast<unsigned>(symbol_entries),
+            static_cast<unsigned>(symbol_data_end_ - symbol_data_));
   }
 
   static const unsigned kMagicNumber = 0xBadDead;
-  static const unsigned kCurrentVersion = 2;
+  static const unsigned kCurrentVersion = 3;
 
   static const int kMagicOffset = 0;
   static const int kVersionOffset = 1;
@@ -171,29 +164,31 @@
   static const int kSizeOffset = 5;
   static const int kHeaderSize = 6;
 
+  // If encoding a message, the following positions are fixed.
   static const int kMessageStartPos = 0;
   static const int kMessageEndPos = 1;
   static const int kMessageArgCountPos = 2;
   static const int kMessageTextPos = 3;
 
+  static const byte kNumberTerminator = 0x80u;
+
  private:
   Vector<unsigned> store_;
+  unsigned char* symbol_data_;
+  unsigned char* symbol_data_end_;
   int function_index_;
-  int symbol_index_;
-  int symbol_id_;
   bool owns_store_;
 
   unsigned Read(int position);
   unsigned* ReadAddress(int position);
+  // Reads a number from the current symbols
+  int ReadNumber(byte** source);
 
   ScriptDataImpl(const char* backing_store, int length)
       : store_(reinterpret_cast<unsigned*>(const_cast<char*>(backing_store)),
                length / sizeof(unsigned)),
-        function_index_(kHeaderSize),
-        symbol_id_(0),
         owns_store_(false) {
     ASSERT_EQ(0, reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned));
-    Initialize();
   }
 
   // Read strings written by ParserRecorder::WriteString.
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index cef825d..cdfa9e2 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -46,8 +46,7 @@
 
 
 CodeEntry::CodeEntry(int security_token_id)
-    : call_uid_(0),
-      tag_(Logger::FUNCTION_TAG),
+    : tag_(Logger::FUNCTION_TAG),
       name_prefix_(kEmptyNamePrefix),
       name_(""),
       resource_name_(""),
@@ -62,8 +61,7 @@
                      const char* resource_name,
                      int line_number,
                      int security_token_id)
-    : call_uid_(next_call_uid_++),
-      tag_(tag),
+    : tag_(tag),
       name_prefix_(name_prefix),
       name_(name),
       resource_name_(resource_name),
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 2de7a2f..525dea2 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -31,6 +31,7 @@
 #include "global-handles.h"
 #include "scopeinfo.h"
 #include "top.h"
+#include "unicode.h"
 #include "zone-inl.h"
 
 #include "profile-generator-inl.h"
@@ -120,11 +121,9 @@
 
 
 const char* CodeEntry::kEmptyNamePrefix = "";
-unsigned CodeEntry::next_call_uid_ = 1;
 
 
 void CodeEntry::CopyData(const CodeEntry& source) {
-  call_uid_ = source.call_uid_;
   tag_ = source.tag_;
   name_prefix_ = source.name_prefix_;
   name_ = source.name_;
@@ -133,6 +132,29 @@
 }
 
 
+uint32_t CodeEntry::GetCallUid() const {
+  uint32_t hash = ComputeIntegerHash(tag_);
+  hash ^= ComputeIntegerHash(
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)));
+  hash ^= ComputeIntegerHash(
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
+  hash ^= ComputeIntegerHash(
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
+  hash ^= ComputeIntegerHash(line_number_);
+  return hash;
+}
+
+
+bool CodeEntry::IsSameAs(CodeEntry* entry) const {
+  return this == entry
+      || (tag_ == entry->tag_
+          && name_prefix_ == entry->name_prefix_
+          && name_ == entry->name_
+          && resource_name_ == entry->resource_name_
+          && line_number_ == entry->line_number_);
+}
+
+
 ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
   HashMap::Entry* map_entry =
       children_.Lookup(entry, CodeEntryHash(entry), false);
@@ -423,9 +445,10 @@
   CodeTree::Locator locator;
   if (tree_.Find(code_start, &locator)) {
     const CodeEntryInfo& code_info = locator.value();
-    entry->CopyData(*code_info.entry);
-    tree_.Insert(start, &locator);
-    locator.set_value(CodeEntryInfo(entry, code_info.size));
+    if (tree_.Insert(start, &locator)) {
+      entry->CopyData(*code_info.entry);
+      locator.set_value(CodeEntryInfo(entry, code_info.size));
+    }
   }
 }
 
@@ -2132,6 +2155,357 @@
   return diff;
 }
 
+
+class OutputStreamWriter {
+ public:
+  explicit OutputStreamWriter(v8::OutputStream* stream)
+      : stream_(stream),
+        chunk_size_(stream->GetChunkSize()),
+        chunk_(chunk_size_),
+        chunk_pos_(0),
+        aborted_(false) {
+    ASSERT(chunk_size_ > 0);
+  }
+  bool aborted() { return aborted_; }
+  void AddCharacter(char c) {
+    ASSERT(c != '\0');
+    ASSERT(chunk_pos_ < chunk_size_);
+    chunk_[chunk_pos_++] = c;
+    MaybeWriteChunk();
+  }
+  void AddString(const char* s) {
+    AddSubstring(s, StrLength(s));
+  }
+  void AddSubstring(const char* s, int n) {
+    if (n <= 0) return;
+    ASSERT(static_cast<size_t>(n) <= strlen(s));
+    const char* s_end = s + n;
+    while (s < s_end) {
+      int s_chunk_size = Min(
+          chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
+      ASSERT(s_chunk_size > 0);
+      memcpy(chunk_.start() + chunk_pos_, s, s_chunk_size);
+      s += s_chunk_size;
+      chunk_pos_ += s_chunk_size;
+      MaybeWriteChunk();
+    }
+  }
+  void AddNumber(int n) { AddNumberImpl<int>(n, "%d"); }
+  void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); }
+  void AddNumber(uint64_t n) { AddNumberImpl<uint64_t>(n, "%llu"); }
+  void Finalize() {
+    if (aborted_) return;
+    ASSERT(chunk_pos_ < chunk_size_);
+    if (chunk_pos_ != 0) {
+      WriteChunk();
+    }
+    stream_->EndOfStream();
+  }
+
+ private:
+  template<typename T>
+  void AddNumberImpl(T n, const char* format) {
+    ScopedVector<char> buffer(32);
+    int result = OS::SNPrintF(buffer, format, n);
+    USE(result);
+    ASSERT(result != -1);
+    AddString(buffer.start());
+  }
+  void MaybeWriteChunk() {
+    ASSERT(chunk_pos_ <= chunk_size_);
+    if (chunk_pos_ == chunk_size_) {
+      WriteChunk();
+      chunk_pos_ = 0;
+    }
+  }
+  void WriteChunk() {
+    if (aborted_) return;
+    if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) ==
+        v8::OutputStream::kAbort) aborted_ = true;
+  }
+
+  v8::OutputStream* stream_;
+  int chunk_size_;
+  ScopedVector<char> chunk_;
+  int chunk_pos_;
+  bool aborted_;
+};
+
+void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
+  ASSERT(writer_ == NULL);
+  writer_ = new OutputStreamWriter(stream);
+
+  // Since nodes graph is cyclic, we need the first pass to enumerate
+  // them. Strings can be serialized in one pass.
+  EnumerateNodes();
+  SerializeImpl();
+
+  delete writer_;
+  writer_ = NULL;
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeImpl() {
+  writer_->AddCharacter('{');
+  writer_->AddString("\"snapshot\":{");
+  SerializeSnapshot();
+  if (writer_->aborted()) return;
+  writer_->AddString("},\n");
+  writer_->AddString("\"nodes\":[");
+  SerializeNodes();
+  if (writer_->aborted()) return;
+  writer_->AddString("],\n");
+  writer_->AddString("\"strings\":[");
+  SerializeStrings();
+  if (writer_->aborted()) return;
+  writer_->AddCharacter(']');
+  writer_->AddCharacter('}');
+  writer_->Finalize();
+}
+
+
+class HeapSnapshotJSONSerializerEnumerator {
+ public:
+  explicit HeapSnapshotJSONSerializerEnumerator(HeapSnapshotJSONSerializer* s)
+      : s_(s) {
+  }
+  void Apply(HeapEntry** entry) {
+    s_->GetNodeId(*entry);
+  }
+ private:
+  HeapSnapshotJSONSerializer* s_;
+};
+
+void HeapSnapshotJSONSerializer::EnumerateNodes() {
+  GetNodeId(snapshot_->root());  // Make sure root gets the first id.
+  HeapSnapshotJSONSerializerEnumerator iter(this);
+  snapshot_->IterateEntries(&iter);
+}
+
+
+int HeapSnapshotJSONSerializer::GetNodeId(HeapEntry* entry) {
+  HashMap::Entry* cache_entry = nodes_.Lookup(entry, ObjectHash(entry), true);
+  if (cache_entry->value == NULL) {
+    cache_entry->value = reinterpret_cast<void*>(next_node_id_++);
+  }
+  return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
+}
+
+
+int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
+  HashMap::Entry* cache_entry = strings_.Lookup(
+      const_cast<char*>(s), ObjectHash(s), true);
+  if (cache_entry->value == NULL) {
+    cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
+  }
+  return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge) {
+  writer_->AddCharacter(',');
+  writer_->AddNumber(edge->type());
+  writer_->AddCharacter(',');
+  if (edge->type() == HeapGraphEdge::kElement) {
+    writer_->AddNumber(edge->index());
+  } else {
+    writer_->AddNumber(GetStringId(edge->name()));
+  }
+  writer_->AddCharacter(',');
+  writer_->AddNumber(GetNodeId(edge->to()));
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
+  writer_->AddCharacter('\n');
+  writer_->AddCharacter(',');
+  writer_->AddNumber(entry->type());
+  writer_->AddCharacter(',');
+  writer_->AddNumber(GetStringId(entry->name()));
+  writer_->AddCharacter(',');
+  writer_->AddNumber(entry->id());
+  writer_->AddCharacter(',');
+  writer_->AddNumber(entry->self_size());
+  Vector<HeapGraphEdge> children = entry->children();
+  writer_->AddCharacter(',');
+  writer_->AddNumber(children.length());
+  for (int i = 0; i < children.length(); ++i) {
+    SerializeEdge(&children[i]);
+    if (writer_->aborted()) return;
+  }
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeNodes() {
+  // The first (zero) item of nodes array is a JSON-ified object
+  // describing node serialization layout.
+  // We use a set of macros to improve readability.
+#define JSON_A(s) "["s"]"
+#define JSON_O(s) "{"s"}"
+#define JSON_S(s) "\\\""s"\\\""
+  writer_->AddString("\"" JSON_O(
+    JSON_S("fields") ":" JSON_A(
+        JSON_S("type")
+        "," JSON_S("name")
+        "," JSON_S("id")
+        "," JSON_S("self_size")
+        "," JSON_S("children_count")
+        "," JSON_S("children"))
+    "," JSON_S("types") ":" JSON_A(
+        JSON_A(
+            JSON_S("internal")
+            "," JSON_S("array")
+            "," JSON_S("string")
+            "," JSON_S("object")
+            "," JSON_S("code")
+            "," JSON_S("closure"))
+        "," JSON_S("string")
+        "," JSON_S("number")
+        "," JSON_S("number")
+        "," JSON_S("number")
+        "," JSON_O(
+            JSON_S("fields") ":" JSON_A(
+                JSON_S("type")
+                "," JSON_S("name_or_index")
+                "," JSON_S("to_node"))
+            "," JSON_S("types") ":" JSON_A(
+                JSON_A(
+                    JSON_S("context")
+                    "," JSON_S("element")
+                    "," JSON_S("property")
+                    "," JSON_S("internal"))
+                "," JSON_S("string_or_number")
+                "," JSON_S("node"))))) "\"");
+#undef JSON_S
+#undef JSON_O
+#undef JSON_A
+
+  const int node_fields_count = 5;  // type,name,id,self_size,children_count.
+  const int edge_fields_count = 3;  // type,name|index,to_node.
+  List<HashMap::Entry*> sorted_nodes;
+  SortHashMap(&nodes_, &sorted_nodes);
+  // Rewrite node ids, so they refer to actual array positions.
+  if (sorted_nodes.length() > 1) {
+    // Nodes start from array index 1.
+    int prev_value = 1;
+    sorted_nodes[0]->value = reinterpret_cast<void*>(prev_value);
+    for (int i = 1; i < sorted_nodes.length(); ++i) {
+      HeapEntry* prev_heap_entry =
+          reinterpret_cast<HeapEntry*>(sorted_nodes[i-1]->key);
+      prev_value += node_fields_count +
+          prev_heap_entry->children().length() * edge_fields_count;
+      sorted_nodes[i]->value = reinterpret_cast<void*>(prev_value);
+    }
+  }
+  for (int i = 0; i < sorted_nodes.length(); ++i) {
+    SerializeNode(reinterpret_cast<HeapEntry*>(sorted_nodes[i]->key));
+    if (writer_->aborted()) return;
+  }
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeSnapshot() {
+  writer_->AddString("\"title\":\"");
+  writer_->AddString(snapshot_->title());
+  writer_->AddString("\"");
+  writer_->AddString(",\"uid\":");
+  writer_->AddNumber(snapshot_->uid());
+}
+
+
+static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
+  static const char hex_chars[] = "0123456789ABCDEF";
+  w->AddString("\\u");
+  w->AddCharacter(hex_chars[(u >> 12) & 0xf]);
+  w->AddCharacter(hex_chars[(u >> 8) & 0xf]);
+  w->AddCharacter(hex_chars[(u >> 4) & 0xf]);
+  w->AddCharacter(hex_chars[u & 0xf]);
+}
+
+void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
+  writer_->AddCharacter('\n');
+  writer_->AddCharacter('\"');
+  for ( ; *s != '\0'; ++s) {
+    switch (*s) {
+      case '\b':
+        writer_->AddString("\\b");
+        continue;
+      case '\f':
+        writer_->AddString("\\f");
+        continue;
+      case '\n':
+        writer_->AddString("\\n");
+        continue;
+      case '\r':
+        writer_->AddString("\\r");
+        continue;
+      case '\t':
+        writer_->AddString("\\t");
+        continue;
+      case '\"':
+      case '\\':
+        writer_->AddCharacter('\\');
+        writer_->AddCharacter(*s);
+        continue;
+      default:
+        if (*s > 31 && *s < 128) {
+          writer_->AddCharacter(*s);
+        } else if (*s <= 31) {
+          // Special character with no dedicated literal.
+          WriteUChar(writer_, *s);
+        } else {
+          // Convert UTF-8 into \u UTF-16 literal.
+          unsigned length = 1, cursor = 0;
+          for ( ; length <= 4 && *(s + length) != '\0'; ++length) { }
+          unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor);
+          if (c != unibrow::Utf8::kBadChar) {
+            WriteUChar(writer_, c);
+            ASSERT(cursor != 0);
+            s += cursor - 1;
+          } else {
+            writer_->AddCharacter('?');
+          }
+        }
+    }
+  }
+  writer_->AddCharacter('\"');
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeStrings() {
+  List<HashMap::Entry*> sorted_strings;
+  SortHashMap(&strings_, &sorted_strings);
+  writer_->AddString("\"<dummy>\"");
+  for (int i = 0; i < sorted_strings.length(); ++i) {
+    writer_->AddCharacter(',');
+    SerializeString(
+        reinterpret_cast<const unsigned char*>(sorted_strings[i]->key));
+    if (writer_->aborted()) return;
+  }
+}
+
+
+template<typename T>
+inline static int SortUsingEntryValue(const T* x, const T* y) {
+  uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value);
+  uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value);
+  if (x_uint > y_uint) {
+    return 1;
+  } else if (x_uint == y_uint) {
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+
+void HeapSnapshotJSONSerializer::SortHashMap(
+    HashMap* map, List<HashMap::Entry*>* sorted_entries) {
+  for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p))
+    sorted_entries->Add(p);
+  sorted_entries->Sort(SortUsingEntryValue);
+}
+
 } }  // namespace v8::internal
 
 #endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/profile-generator.h b/src/profile-generator.h
index c6d6f4c..1e949a2 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -100,17 +100,17 @@
   INLINE(const char* name() const) { return name_; }
   INLINE(const char* resource_name() const) { return resource_name_; }
   INLINE(int line_number() const) { return line_number_; }
-  INLINE(unsigned call_uid() const) { return call_uid_; }
   INLINE(int security_token_id() const) { return security_token_id_; }
 
   INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
 
   void CopyData(const CodeEntry& source);
+  uint32_t GetCallUid() const;
+  bool IsSameAs(CodeEntry* entry) const;
 
   static const char* kEmptyNamePrefix;
 
  private:
-  unsigned call_uid_;
   Logger::LogEventsAndTags tag_;
   const char* name_prefix_;
   const char* name_;
@@ -118,8 +118,6 @@
   int line_number_;
   int security_token_id_;
 
-  static unsigned next_call_uid_;
-
   DISALLOW_COPY_AND_ASSIGN(CodeEntry);
 };
 
@@ -147,11 +145,12 @@
 
  private:
   INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) {
-    return entry1 == entry2;
+    return reinterpret_cast<CodeEntry*>(entry1)->IsSameAs(
+        reinterpret_cast<CodeEntry*>(entry2));
   }
 
   INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) {
-    return static_cast<int32_t>(reinterpret_cast<intptr_t>(entry));
+    return entry->GetCallUid();
   }
 
   ProfileTree* tree_;
@@ -746,7 +745,8 @@
   }
 
   static uint32_t AddressHash(Address addr) {
-    return static_cast<int32_t>(reinterpret_cast<intptr_t>(addr));
+    return ComputeIntegerHash(
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)));
   }
 
   bool initial_fill_mode_;
@@ -889,7 +889,8 @@
   };
 
   uint32_t Hash(HeapObject* object) {
-    return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
+    return ComputeIntegerHash(
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(object)));
   }
   static bool HeapObjectsMatch(void* key1, void* key2) { return key1 == key2; }
 
@@ -976,6 +977,55 @@
   DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
 };
 
+class OutputStreamWriter;
+
+class HeapSnapshotJSONSerializer {
+ public:
+  explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot)
+      : snapshot_(snapshot),
+        nodes_(ObjectsMatch),
+        strings_(ObjectsMatch),
+        next_node_id_(1),
+        next_string_id_(1),
+        writer_(NULL) {
+  }
+  void Serialize(v8::OutputStream* stream);
+
+ private:
+  INLINE(static bool ObjectsMatch(void* key1, void* key2)) {
+    return key1 == key2;
+  }
+
+  INLINE(static uint32_t ObjectHash(const void* key)) {
+    return ComputeIntegerHash(
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)));
+  }
+
+  void EnumerateNodes();
+  int GetNodeId(HeapEntry* entry);
+  int GetStringId(const char* s);
+  void SerializeEdge(HeapGraphEdge* edge);
+  void SerializeImpl();
+  void SerializeNode(HeapEntry* entry);
+  void SerializeNodes();
+  void SerializeSnapshot();
+  void SerializeString(const unsigned char* s);
+  void SerializeStrings();
+  void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
+
+  HeapSnapshot* snapshot_;
+  HashMap nodes_;
+  HashMap strings_;
+  int next_node_id_;
+  int next_string_id_;
+  OutputStreamWriter* writer_;
+
+  friend class HeapSnapshotJSONSerializerEnumerator;
+  friend class HeapSnapshotJSONSerializerIterator;
+
+  DISALLOW_COPY_AND_ASSIGN(HeapSnapshotJSONSerializer);
+};
+
 } }  // namespace v8::internal
 
 #endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/regexp.js b/src/regexp.js
index 566a96c..faa525d 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -186,6 +186,10 @@
       %_IsRegExpEquivalent(cache.regExp, this) &&
       %_ObjectEquals(cache.subject, string)) {
     if (cache.answerSaved) {
+      // If this regexp is not global, cache.lastIndex is zero, so we only get 
+      // here if this.lastIndex is zero, and resulting this.lastIndex
+      // must be zero too, so no change is necessary.
+      if (this.global) this.lastIndex = lastMatchInfo[CAPTURE1];
       return %_RegExpCloneResult(cache.answer);
     } else {
       saveAnswer = true;
@@ -282,6 +286,10 @@
       %_IsRegExpEquivalent(cache.regExp, this) &&
       %_ObjectEquals(cache.subject, string) &&
       %_ObjectEquals(cache.lastIndex, lastIndex)) {
+    // If this regexp is not global, cache.lastIndex is zero, so we only get 
+    // here if this.lastIndex is zero, and resulting this.lastIndex
+    // must be zero too, so no change is necessary.
+    if (this.global) this.lastIndex = lastMatchInfo[CAPTURE1];
     return cache.answer;
   }
 
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 4ddf1bf..f253ec5 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -525,8 +525,8 @@
         Variable* rvar = rvar_proxy->AsVariable();
         if (lvar != NULL && rvar != NULL) {
           if (lvar->mode() == Variable::VAR && rvar->mode() == Variable::VAR) {
-            Slot* lslot = lvar->slot();
-            Slot* rslot = rvar->slot();
+            Slot* lslot = lvar->AsSlot();
+            Slot* rslot = rvar->AsSlot();
             if (lslot->type() == rslot->type() &&
                 (lslot->type() == Slot::PARAMETER ||
                  lslot->type() == Slot::LOCAL) &&
@@ -692,7 +692,7 @@
   }
 
   void Process(ZoneList<Statement*>* statements);
-  bool result_assigned() const  { return result_assigned_; }
+  bool result_assigned() const { return result_assigned_; }
 
  private:
   VariableProxy* result_;
diff --git a/src/runtime.cc b/src/runtime.cc
index a1f6810..8d58db7 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -638,56 +638,78 @@
   Handle<FixedArray> elms = Factory::NewFixedArray(DESCRIPTOR_SIZE);
   Handle<JSArray> desc = Factory::NewJSArrayWithElements(elms);
   LookupResult result;
-  CONVERT_CHECKED(JSObject, obj, args[0]);
-  CONVERT_CHECKED(String, name, args[1]);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
 
   // This could be an element.
   uint32_t index;
   if (name->AsArrayIndex(&index)) {
-    if (!obj->HasLocalElement(index)) {
-      return Heap::undefined_value();
-    }
+    switch (obj->HasLocalElement(index)) {
+      case JSObject::UNDEFINED_ELEMENT:
+        return Heap::undefined_value();
 
-    // Special handling of string objects according to ECMAScript 5 15.5.5.2.
-    // Note that this might be a string object with elements other than the
-    // actual string value. This is covered by the subsequent cases.
-    if (obj->IsStringObjectWithCharacterAt(index)) {
-      JSValue* js_value = JSValue::cast(obj);
-      String* str = String::cast(js_value->value());
-      elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
-      elms->set(VALUE_INDEX, str->SubString(index, index+1));
-      elms->set(WRITABLE_INDEX, Heap::false_value());
-      elms->set(ENUMERABLE_INDEX,  Heap::false_value());
-      elms->set(CONFIGURABLE_INDEX, Heap::false_value());
-      return *desc;
-    }
+      case JSObject::STRING_CHARACTER_ELEMENT: {
+        // Special handling of string objects according to ECMAScript 5
+        // 15.5.5.2. Note that this might be a string object with elements
+        // other than the actual string value. This is covered by the
+        // subsequent cases.
+        Handle<JSValue> js_value = Handle<JSValue>::cast(obj);
+        Handle<String> str(String::cast(js_value->value()));
+        Handle<String> substr = SubString(str, index, index+1, NOT_TENURED);
 
-    // This can potentially be an element in the elements dictionary or
-    // a fast element.
-    if (obj->HasDictionaryElements()) {
-      NumberDictionary* dictionary = obj->element_dictionary();
-      int entry = dictionary->FindEntry(index);
-      PropertyDetails details = dictionary->DetailsAt(entry);
-      elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
-      elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
-      elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
-      elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!details.IsDontEnum()));
-      elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
-      return *desc;
-    } else {
-      // Elements that are stored as array elements always has:
-      // writable: true, configurable: true, enumerable: true.
-      elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
-      elms->set(VALUE_INDEX, obj->GetElement(index));
-      elms->set(WRITABLE_INDEX, Heap::true_value());
-      elms->set(ENUMERABLE_INDEX,  Heap::true_value());
-      elms->set(CONFIGURABLE_INDEX, Heap::true_value());
-      return *desc;
+        elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+        elms->set(VALUE_INDEX, *substr);
+        elms->set(WRITABLE_INDEX, Heap::false_value());
+        elms->set(ENUMERABLE_INDEX,  Heap::false_value());
+        elms->set(CONFIGURABLE_INDEX, Heap::false_value());
+        return *desc;
+      }
+
+      case JSObject::INTERCEPTED_ELEMENT:
+      case JSObject::FAST_ELEMENT: {
+        elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+        Handle<Object> element = GetElement(Handle<Object>(obj), index);
+        elms->set(VALUE_INDEX, *element);
+        elms->set(WRITABLE_INDEX, Heap::true_value());
+        elms->set(ENUMERABLE_INDEX,  Heap::true_value());
+        elms->set(CONFIGURABLE_INDEX, Heap::true_value());
+        return *desc;
+      }
+
+      case JSObject::DICTIONARY_ELEMENT: {
+        NumberDictionary* dictionary = obj->element_dictionary();
+        int entry = dictionary->FindEntry(index);
+        ASSERT(entry != NumberDictionary::kNotFound);
+        PropertyDetails details = dictionary->DetailsAt(entry);
+        switch (details.type()) {
+          case CALLBACKS: {
+            // This is an accessor property with getter and/or setter.
+            FixedArray* callbacks =
+                FixedArray::cast(dictionary->ValueAt(entry));
+            elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
+            elms->set(GETTER_INDEX, callbacks->get(0));
+            elms->set(SETTER_INDEX, callbacks->get(1));
+            break;
+          }
+          case NORMAL:
+            // This is a data property.
+            elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+            elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
+            elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
+            break;
+          default:
+            UNREACHABLE();
+            break;
+        }
+        elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!details.IsDontEnum()));
+        elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
+        return *desc;
+      }
     }
   }
 
   // Use recursive implementation to also traverse hidden prototypes
-  GetOwnPropertyImplementation(obj, name, &result);
+  GetOwnPropertyImplementation(*obj, *name, &result);
 
   if (!result.IsProperty()) {
     return Heap::undefined_value();
@@ -698,7 +720,8 @@
       // Property that is internally implemented as a callback or
       // an API defined callback.
       Object* value = obj->GetPropertyWithCallback(
-          obj, structure, name, result.holder());
+          *obj, structure, *name, result.holder());
+      if (value->IsFailure()) return value;
       elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
       elms->set(VALUE_INDEX, value);
       elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
@@ -946,7 +969,7 @@
   Handle<String> name(String::cast(args[1]));
   PropertyAttributes mode =
       static_cast<PropertyAttributes>(Smi::cast(args[2])->value());
-  ASSERT(mode == READ_ONLY || mode == NONE);
+  RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
   Handle<Object> initial_value(args[3]);
 
   // Declarations are always done in the function context.
@@ -2837,7 +2860,8 @@
   for (int i = 0; i < matches ; i++) {
     int from = offsets.at(i * 2);
     int to = offsets.at(i * 2 + 1);
-    elements->set(i, *Factory::NewSubString(subject, from, to));
+    Handle<String> match = Factory::NewSubString(subject, from, to);
+    elements->set(i, *match);
   }
   Handle<JSArray> result = Factory::NewJSArrayWithElements(elements);
   result->set_length(Smi::FromInt(matches));
@@ -3105,9 +3129,10 @@
         // Arguments array to replace function is match, captures, index and
         // subject, i.e., 3 + capture count in total.
         Handle<FixedArray> elements = Factory::NewFixedArray(3 + capture_count);
-        elements->set(0, *Factory::NewSubString(subject,
-                                                match_start,
-                                                match_end));
+        Handle<String> match = Factory::NewSubString(subject,
+                                                     match_start,
+                                                     match_end);
+        elements->set(0, *match);
         for (int i = 1; i <= capture_count; i++) {
           int start = register_vector[i * 2];
           if (start >= 0) {
@@ -4519,7 +4544,6 @@
   RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
   double value = StringToInt(s, radix);
   return Heap::NumberFromDouble(value);
-  return Heap::nan_value();
 }
 
 
@@ -4954,12 +4978,14 @@
                                                             length);
 
     for (int i = num_copied_from_cache; i < length; ++i) {
-      elements->set(i, *LookupSingleCharacterStringFromCode(chars[i]));
+      Handle<Object> str = LookupSingleCharacterStringFromCode(chars[i]);
+      elements->set(i, *str);
     }
   } else {
     elements = Factory::NewFixedArray(length);
     for (int i = 0; i < length; ++i) {
-      elements->set(i, *LookupSingleCharacterStringFromCode(s->Get(i)));
+      Handle<Object> str = LookupSingleCharacterStringFromCode(s->Get(i));
+      elements->set(i, *str);
     }
   }
 
@@ -6280,7 +6306,7 @@
 }
 
 
-static Code* ComputeConstructStub(Handle<JSFunction> function) {
+static void TrySettingInlineConstructStub(Handle<JSFunction> function) {
   Handle<Object> prototype = Factory::null_value();
   if (function->has_instance_prototype()) {
     prototype = Handle<Object>(function->instance_prototype());
@@ -6288,13 +6314,10 @@
   if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
     ConstructStubCompiler compiler;
     Object* code = compiler.CompileConstructStub(function->shared());
-    if (code->IsFailure()) {
-      return Builtins::builtin(Builtins::JSConstructStubGeneric);
+    if (!code->IsFailure()) {
+      function->shared()->set_construct_stub(Code::cast(code));
     }
-    return Code::cast(code);
   }
-
-  return function->shared()->construct_stub();
 }
 
 
@@ -6351,12 +6374,20 @@
   Handle<SharedFunctionInfo> shared(function->shared());
   EnsureCompiled(shared, CLEAR_EXCEPTION);
 
-  bool first_allocation = !function->has_initial_map();
+  if (!function->has_initial_map() &&
+      shared->IsInobjectSlackTrackingInProgress()) {
+    // The tracking is already in progress for another function. We can only
+    // track one initial_map at a time, so we force the completion before the
+    // function is called as a constructor for the first time.
+    shared->CompleteInobjectSlackTracking();
+    TrySettingInlineConstructStub(function);
+  }
+
+  bool first_allocation = !shared->live_objects_may_exist();
   Handle<JSObject> result = Factory::NewJSObject(function);
-  if (first_allocation) {
-    Handle<Code> stub = Handle<Code>(
-        ComputeConstructStub(Handle<JSFunction>(function)));
-    shared->set_construct_stub(*stub);
+  // Delay setting the stub if inobject slack tracking is in progress.
+  if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) {
+    TrySettingInlineConstructStub(function);
   }
 
   Counters::constructed_objects.Increment();
@@ -6366,6 +6397,18 @@
 }
 
 
+static Object* Runtime_FinalizeInstanceSize(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  function->shared()->CompleteInobjectSlackTracking();
+  TrySettingInlineConstructStub(function);
+
+  return Heap::undefined_value();
+}
+
+
 static Object* Runtime_LazyCompile(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 1);
@@ -7499,14 +7542,18 @@
     // The backing storage array must have non-existing elements to
     // preserve holes across concat operations.
     storage = Factory::NewFixedArrayWithHoles(result_length);
-    result->set_map(*Factory::GetFastElementsMap(Handle<Map>(result->map())));
+    Handle<Map> fast_map =
+        Factory::GetFastElementsMap(Handle<Map>(result->map()));
+    result->set_map(*fast_map);
   } else {
     // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
     uint32_t at_least_space_for = estimate_nof_elements +
                                   (estimate_nof_elements >> 2);
     storage = Handle<FixedArray>::cast(
                   Factory::NewNumberDictionary(at_least_space_for));
-    result->set_map(*Factory::GetSlowElementsMap(Handle<Map>(result->map())));
+    Handle<Map> slow_map =
+        Factory::GetSlowElementsMap(Handle<Map>(result->map()));
+    result->set_map(*slow_map);
   }
 
   Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length));
@@ -7827,7 +7874,8 @@
   uint32_t index;
   if (name->AsArrayIndex(&index)) {
     Handle<FixedArray> details = Factory::NewFixedArray(2);
-    details->set(0, Runtime::GetElementOrCharAt(obj, index));
+    Object* element_or_char = Runtime::GetElementOrCharAt(obj, index);
+    details->set(0, element_or_char);
     details->set(1, PropertyDetails(NONE, NORMAL).AsSmi());
     return *Factory::NewJSArrayWithElements(details);
   }
@@ -8629,7 +8677,8 @@
 
   // Fill in scope details.
   details->set(kScopeDetailsTypeIndex, Smi::FromInt(it.Type()));
-  details->set(kScopeDetailsObjectIndex, *it.ScopeObject());
+  Handle<JSObject> scope_object = it.ScopeObject();
+  details->set(kScopeDetailsObjectIndex, *scope_object);
 
   return *Factory::NewJSArrayWithElements(details);
 }
@@ -8674,10 +8723,10 @@
   Handle<FixedArray> frames_array = Factory::NewFixedArray(frames_count);
   for (int i = 0; i < frames_count; i++) {
     Handle<JSObject> frame_value = Factory::NewJSObject(Top::object_function());
-    frame_value->SetProperty(
-        *address_str,
-        *Factory::NewNumberFromInt(reinterpret_cast<int>(frames[i].address)),
-        NONE);
+    Handle<Object> frame_address =
+        Factory::NewNumberFromInt(reinterpret_cast<int>(frames[i].address));
+
+    frame_value->SetProperty(*address_str, *frame_address, NONE);
 
     // Get the stack walk text for this frame.
     Handle<String> frame_text;
@@ -8945,24 +8994,39 @@
 }
 
 
-// Change the state of break on exceptions
-// args[0]: boolean indicating uncaught exceptions
-// args[1]: boolean indicating on/off
+// Change the state of break on exceptions.
+// args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
+// args[1]: Boolean indicating on/off.
 static Object* Runtime_ChangeBreakOnException(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 2);
-  ASSERT(args[0]->IsNumber());
-  ASSERT(args[1]->IsBoolean());
+  RUNTIME_ASSERT(args[0]->IsNumber());
+  CONVERT_BOOLEAN_CHECKED(enable, args[1]);
 
-  // Update break point state
+  // If the number doesn't match an enum value, the ChangeBreakOnException
+  // function will default to affecting caught exceptions.
   ExceptionBreakType type =
       static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
-  bool enable = args[1]->ToBoolean()->IsTrue();
+  // Update break point state.
   Debug::ChangeBreakOnException(type, enable);
   return Heap::undefined_value();
 }
 
 
+// Returns the state of break on exceptions
+// args[0]: boolean indicating uncaught exceptions
+static Object* Runtime_IsBreakOnException(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  RUNTIME_ASSERT(args[0]->IsNumber());
+
+  ExceptionBreakType type =
+      static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
+  bool result = Debug::IsBreakOnException(type);
+  return Smi::FromInt(result);
+}
+
+
 // Prepare for stepping
 // args[0]: break id for checking execution state
 // args[1]: step action from the enumeration StepAction
@@ -9024,10 +9088,10 @@
   // Recursively copy the with contexts.
   Handle<Context> previous(context_chain->previous());
   Handle<JSObject> extension(JSObject::cast(context_chain->extension()));
-  return Factory::NewWithContext(
-      CopyWithContextChain(function_context, previous),
-      extension,
-      context_chain->IsCatchContext());
+  Handle<Context> context = CopyWithContextChain(function_context, previous);
+  return Factory::NewWithContext(context,
+                                 extension,
+                                 context_chain->IsCatchContext());
 }
 
 
@@ -10064,6 +10128,7 @@
   inline_runtime_functions = false;
   RUNTIME_FUNCTION_LIST(ADD_ENTRY)
   inline_runtime_functions = true;
+  INLINE_FUNCTION_LIST(ADD_ENTRY)
   INLINE_RUNTIME_FUNCTION_LIST(ADD_ENTRY)
 #undef ADD_ENTRY
   return *result;
@@ -10090,35 +10155,55 @@
 // ----------------------------------------------------------------------------
 // Implementation of Runtime
 
-#define F(name, nargs, ressize)                                           \
-  { #name, FUNCTION_ADDR(Runtime_##name), nargs, \
-    static_cast<int>(Runtime::k##name), ressize },
+#define F(name, number_of_args, result_size)                             \
+  { Runtime::k##name, Runtime::RUNTIME, #name,   \
+    FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
 
-static Runtime::Function Runtime_functions[] = {
+
+#define I(name, number_of_args, result_size)                             \
+  { Runtime::kInline##name, Runtime::INLINE,     \
+    "_" #name, NULL, number_of_args, result_size },
+
+Runtime::Function kIntrinsicFunctions[] = {
   RUNTIME_FUNCTION_LIST(F)
-  { NULL, NULL, 0, -1, 0 }
+  INLINE_FUNCTION_LIST(I)
+  INLINE_RUNTIME_FUNCTION_LIST(I)
 };
 
-#undef F
 
-
-Runtime::Function* Runtime::FunctionForId(FunctionId fid) {
-  ASSERT(0 <= fid && fid < kNofFunctions);
-  return &Runtime_functions[fid];
+Object* Runtime::InitializeIntrinsicFunctionNames(Object* dictionary) {
+  ASSERT(dictionary != NULL);
+  ASSERT(StringDictionary::cast(dictionary)->NumberOfElements() == 0);
+  for (int i = 0; i < kNumFunctions; ++i) {
+    Object* name_symbol = Heap::LookupAsciiSymbol(kIntrinsicFunctions[i].name);
+    if (name_symbol->IsFailure()) return name_symbol;
+    StringDictionary* string_dictionary = StringDictionary::cast(dictionary);
+    dictionary = string_dictionary->Add(String::cast(name_symbol),
+                                        Smi::FromInt(i),
+                                        PropertyDetails(NONE, NORMAL));
+    // Non-recoverable failure.  Calling code must restart heap initialization.
+    if (dictionary->IsFailure()) return dictionary;
+  }
+  return dictionary;
 }
 
 
-Runtime::Function* Runtime::FunctionForName(Vector<const char> name) {
-  for (Function* f = Runtime_functions; f->name != NULL; f++) {
-    if (strncmp(f->name, name.start(), name.length()) == 0
-        && f->name[name.length()] == 0) {
-      return f;
-    }
+Runtime::Function* Runtime::FunctionForSymbol(Handle<String> name) {
+  int entry = Heap::intrinsic_function_names()->FindEntry(*name);
+  if (entry != kNotFound) {
+    Object* smi_index = Heap::intrinsic_function_names()->ValueAt(entry);
+    int function_index = Smi::cast(smi_index)->value();
+    return &(kIntrinsicFunctions[function_index]);
   }
   return NULL;
 }
 
 
+Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
+  return &(kIntrinsicFunctions[static_cast<int>(id)]);
+}
+
+
 void Runtime::PerformGC(Object* result) {
   Failure* failure = Failure::cast(result);
   if (failure->IsRetryAfterGC()) {
diff --git a/src/runtime.h b/src/runtime.h
index 312907a..19f4144 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -263,6 +263,7 @@
   F(NewClosure, 2, 1) \
   F(NewObject, 1, 1) \
   F(NewObjectFromBound, 2, 1) \
+  F(FinalizeInstanceSize, 1, 1) \
   F(Throw, 1, 1) \
   F(ReThrow, 1, 1) \
   F(ThrowReferenceError, 1, 1) \
@@ -332,6 +333,7 @@
   F(SetScriptBreakPoint, 3, 1) \
   F(ClearBreakPoint, 1, 1) \
   F(ChangeBreakOnException, 2, 1) \
+  F(IsBreakOnException, 1, 1) \
   F(PrepareStep, 3, 1) \
   F(ClearStepping, 0, 1) \
   F(DebugEvaluate, 4, 1) \
@@ -389,6 +391,59 @@
   RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
 
 // ----------------------------------------------------------------------------
+// INLINE_FUNCTION_LIST defines all inlined functions accessed
+// with a native call of the form %_name from within JS code.
+// Entries have the form F(name, number of arguments, number of return values).
+#define INLINE_FUNCTION_LIST(F) \
+  F(IsSmi, 1, 1)                                                             \
+  F(IsNonNegativeSmi, 1, 1)                                                  \
+  F(IsArray, 1, 1)                                                           \
+  F(IsRegExp, 1, 1)                                                          \
+  F(CallFunction, -1 /* receiver + n args + function */, 1)                  \
+  F(ArgumentsLength, 0, 1)                                                   \
+  F(Arguments, 1, 1)                                                         \
+  F(ValueOf, 1, 1)                                                           \
+  F(SetValueOf, 2, 1)                                                        \
+  F(StringCharFromCode, 1, 1)                                                \
+  F(StringCharAt, 2, 1)                                                      \
+  F(ObjectEquals, 2, 1)                                                      \
+  F(RandomHeapNumber, 0, 1)                                                  \
+  F(IsObject, 1, 1)                                                          \
+  F(IsFunction, 1, 1)                                                        \
+  F(IsUndetectableObject, 1, 1)                                              \
+  F(IsSpecObject, 1, 1)                                                      \
+  F(IsStringWrapperSafeForDefaultValueOf, 1, 1)                              \
+  F(MathPow, 2, 1)                                                           \
+  F(MathSin, 1, 1)                                                           \
+  F(MathCos, 1, 1)                                                           \
+  F(MathSqrt, 1, 1)                                                          \
+  F(IsRegExpEquivalent, 2, 1)                                                \
+  F(HasCachedArrayIndex, 1, 1)                                               \
+  F(GetCachedArrayIndex, 1, 1)
+
+
+// ----------------------------------------------------------------------------
+// INLINE_AND_RUNTIME_FUNCTION_LIST defines all inlined functions accessed
+// with a native call of the form %_name from within JS code that also have
+  // a corresponding runtime function, that is called for slow cases.
+// Entries have the form F(name, number of arguments, number of return values).
+#define INLINE_RUNTIME_FUNCTION_LIST(F) \
+  F(IsConstructCall, 0, 1)                                                   \
+  F(ClassOf, 1, 1)                                                           \
+  F(StringCharCodeAt, 2, 1)                                                  \
+  F(Log, 3, 1)                                                               \
+  F(StringAdd, 2, 1)                                                         \
+  F(SubString, 3, 1)                                                         \
+  F(StringCompare, 2, 1)                                                     \
+  F(RegExpExec, 4, 1)                                                        \
+  F(RegExpConstructResult, 3, 1)                                             \
+  F(RegExpCloneResult, 1, 1)                                                 \
+  F(GetFromCache, 2, 1)                                                      \
+  F(NumberToString, 1, 1)                                                    \
+  F(SwapElements, 3, 1)
+
+
+//---------------------------------------------------------------------------
 // Runtime provides access to all C++ runtime functions.
 
 class Runtime : public AllStatic {
@@ -396,33 +451,52 @@
   enum FunctionId {
 #define F(name, nargs, ressize) k##name,
     RUNTIME_FUNCTION_LIST(F)
-    kNofFunctions
 #undef F
+#define F(name, nargs, ressize) kInline##name,
+    INLINE_FUNCTION_LIST(F)
+    INLINE_RUNTIME_FUNCTION_LIST(F)
+#undef F
+    kNumFunctions,
+    kFirstInlineFunction = kInlineIsSmi
   };
 
-  // Runtime function descriptor.
+  enum IntrinsicType {
+    RUNTIME,
+    INLINE
+  };
+
+  // Intrinsic function descriptor.
   struct Function {
+    FunctionId function_id;
+    IntrinsicType intrinsic_type;
     // The JS name of the function.
     const char* name;
 
-    // The C++ (native) entry point.
+    // The C++ (native) entry point.  NULL if the function is inlined.
     byte* entry;
 
-    // The number of arguments expected; nargs < 0 if variable no. of
-    // arguments.
+    // The number of arguments expected. nargs is -1 if the function takes
+    // a variable number of arguments.
     int nargs;
-    int stub_id;
-    // Size of result, if complex (larger than a single pointer),
-    // otherwise zero.
+    // Size of result.  Most functions return a single pointer, size 1.
     int result_size;
   };
 
-  // Get the runtime function with the given function id.
-  static Function* FunctionForId(FunctionId fid);
+  static const int kNotFound = -1;
 
-  // Get the runtime function with the given name.
-  static Function* FunctionForName(Vector<const char> name);
+  // Add symbols for all the intrinsic function names to a StringDictionary.
+  // Returns failure if an allocation fails.  In this case, it must be
+  // retried with a new, empty StringDictionary, not with the same one.
+  // Alternatively, heap initialization can be completely restarted.
+  static Object* InitializeIntrinsicFunctionNames(Object* dictionary);
 
+  // Get the intrinsic function with the given name, which must be a symbol.
+  static Function* FunctionForSymbol(Handle<String> name);
+
+  // Get the intrinsic function with the given FunctionId.
+  static Function* FunctionForId(FunctionId id);
+
+  // General-purpose helper functions for runtime system.
   static int StringMatch(Handle<String> sub, Handle<String> pat, int index);
 
   static bool IsUpperCaseChar(uint16_t ch);
diff --git a/src/scanner.cc b/src/scanner.cc
index 15b1d44..79d63f1 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -342,8 +342,11 @@
 // ----------------------------------------------------------------------------
 // Scanner
 
-Scanner::Scanner(ParserMode pre)
-    : is_pre_parsing_(pre == PREPARSE), stack_overflow_(false) { }
+Scanner::Scanner()
+    : has_line_terminator_before_next_(false),
+      is_parsing_json_(false),
+      source_(NULL),
+      stack_overflow_(false) {}
 
 
 void Scanner::Initialize(Handle<String> source,
diff --git a/src/scanner.h b/src/scanner.h
index 8d61846..6e5333b 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -281,8 +281,7 @@
     bool complete_;
   };
 
-  // Construction
-  explicit Scanner(ParserMode parse_mode);
+  Scanner();
 
   // Initialize the Scanner to scan source.
   void Initialize(Handle<String> source,
@@ -298,7 +297,7 @@
   Token::Value Next();
 
   // One token look-ahead (past the token returned by Next()).
-  Token::Value peek() const  { return next_.token; }
+  Token::Value peek() const { return next_.token; }
 
   // Returns true if there was a line terminator before the peek'ed token.
   bool has_line_terminator_before_next() const {
@@ -314,8 +313,8 @@
 
   // Returns the location information for the current token
   // (the token returned by Next()).
-  Location location() const  { return current_.location; }
-  Location peek_location() const  { return next_.location; }
+  Location location() const { return current_.location; }
+  Location peek_location() const { return next_.location; }
 
   // Returns the literal string, if any, for the current token (the
   // token returned by Next()). The string is 0-terminated and in
@@ -488,7 +487,6 @@
   TokenDesc current_;  // desc for current token (as returned by Next())
   TokenDesc next_;     // desc for next token (one token look-ahead)
   bool has_line_terminator_before_next_;
-  bool is_pre_parsing_;
   bool is_parsing_json_;
 
   // Different UTF16 buffers used to pull characters from. Based on input one of
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index 7e7f152..e054d7d 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -37,8 +37,8 @@
 
 
 static int CompareLocal(Variable* const* v, Variable* const* w) {
-  Slot* s = (*v)->slot();
-  Slot* t = (*w)->slot();
+  Slot* s = (*v)->AsSlot();
+  Slot* t = (*w)->AsSlot();
   // We may have rewritten parameters (that are in the arguments object)
   // and which may have a NULL slot... - find a better solution...
   int x = (s != NULL ? s->index() : 0);
@@ -83,7 +83,7 @@
   for (int i = 0; i < locals.length(); i++) {
     Variable* var = locals[i];
     if (var->is_used()) {
-      Slot* slot = var->slot();
+      Slot* slot = var->AsSlot();
       if (slot != NULL) {
         switch (slot->type()) {
           case Slot::PARAMETER:
@@ -112,9 +112,9 @@
   if (scope->num_heap_slots() > 0) {
     // Add user-defined slots.
     for (int i = 0; i < heap_locals.length(); i++) {
-      ASSERT(heap_locals[i]->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+      ASSERT(heap_locals[i]->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
              context_slots_.length());
-      ASSERT(heap_locals[i]->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+      ASSERT(heap_locals[i]->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
              context_modes_.length());
       context_slots_.Add(heap_locals[i]->name());
       context_modes_.Add(heap_locals[i]->mode());
@@ -131,15 +131,15 @@
     Variable* var = scope->function();
     if (var != NULL &&
         var->is_used() &&
-        var->slot()->type() == Slot::CONTEXT) {
+        var->AsSlot()->type() == Slot::CONTEXT) {
       function_name_ = var->name();
       // Note that we must not find the function name in the context slot
       // list - instead it must be handled separately in the
       // Contexts::Lookup() function. Thus record an empty symbol here so we
       // get the correct number of context slots.
-      ASSERT(var->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+      ASSERT(var->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
              context_slots_.length());
-      ASSERT(var->slot()->index() - Context::MIN_CONTEXT_SLOTS ==
+      ASSERT(var->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
              context_modes_.length());
       context_slots_.Add(Factory::empty_symbol());
       context_modes_.Add(Variable::INTERNAL);
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index 0fdab56..b210ae7 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -63,13 +63,13 @@
   // --------------------------------------------------------------------------
   // Lookup
 
-  Handle<String> function_name() const  { return function_name_; }
+  Handle<String> function_name() const { return function_name_; }
 
-  Handle<String> parameter_name(int i) const  { return parameters_[i]; }
-  int number_of_parameters() const  { return parameters_.length(); }
+  Handle<String> parameter_name(int i) const { return parameters_[i]; }
+  int number_of_parameters() const { return parameters_.length(); }
 
-  Handle<String> stack_slot_name(int i) const  { return stack_slots_[i]; }
-  int number_of_stack_slots() const  { return stack_slots_.length(); }
+  Handle<String> stack_slot_name(int i) const { return stack_slots_[i]; }
+  int number_of_stack_slots() const { return stack_slots_.length(); }
 
   Handle<String> context_slot_name(int i) const {
     return context_slots_[i - Context::MIN_CONTEXT_SLOTS];
diff --git a/src/scopes.cc b/src/scopes.cc
index b55e5d5..c4436fe 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -810,8 +810,7 @@
 
     // We are using 'arguments'. Tell the code generator that is needs to
     // allocate the arguments object by setting 'arguments_'.
-    arguments_ = new VariableProxy(Factory::arguments_symbol(), false, false);
-    arguments_->BindTo(arguments);
+    arguments_ = arguments;
 
     // We also need the '.arguments' shadow variable. Declare it and create
     // and bind the corresponding proxy. It's ok to declare it only now
@@ -822,13 +821,13 @@
     // NewTemporary() because the mode needs to be INTERNAL since this
     // variable may be allocated in the heap-allocated context (temporaries
     // are never allocated in the context).
-    Variable* arguments_shadow =
-        new Variable(this, Factory::arguments_shadow_symbol(),
-                     Variable::INTERNAL, true, Variable::ARGUMENTS);
-    arguments_shadow_ =
-        new VariableProxy(Factory::arguments_shadow_symbol(), false, false);
-    arguments_shadow_->BindTo(arguments_shadow);
-    temps_.Add(arguments_shadow);
+    arguments_shadow_ = new Variable(this,
+                                     Factory::arguments_shadow_symbol(),
+                                     Variable::INTERNAL,
+                                     true,
+                                     Variable::ARGUMENTS);
+    arguments_shadow_->set_is_used(true);
+    temps_.Add(arguments_shadow_);
 
     // Allocate the parameters by rewriting them into '.arguments[i]' accesses.
     for (int i = 0; i < params_.length(); i++) {
@@ -839,14 +838,13 @@
           // It is ok to set this only now, because arguments is a local
           // variable that is allocated after the parameters have been
           // allocated.
-          arguments_shadow->is_accessed_from_inner_scope_ = true;
+          arguments_shadow_->is_accessed_from_inner_scope_ = true;
         }
         var->rewrite_ =
-          new Property(arguments_shadow_,
-                       new Literal(Handle<Object>(Smi::FromInt(i))),
-                       RelocInfo::kNoPosition,
-                       Property::SYNTHETIC);
-        if (var->is_used()) arguments_shadow->set_is_used(true);
+            new Property(new VariableProxy(arguments_shadow_),
+                         new Literal(Handle<Object>(Smi::FromInt(i))),
+                         RelocInfo::kNoPosition,
+                         Property::SYNTHETIC);
       }
     }
 
@@ -862,7 +860,8 @@
       if (MustAllocate(var)) {
         if (MustAllocateInContext(var)) {
           ASSERT(var->rewrite_ == NULL ||
-                 (var->slot() != NULL && var->slot()->type() == Slot::CONTEXT));
+                 (var->AsSlot() != NULL &&
+                  var->AsSlot()->type() == Slot::CONTEXT));
           if (var->rewrite_ == NULL) {
             // Only set the heap allocation if the parameter has not
             // been allocated yet.
@@ -870,8 +869,8 @@
           }
         } else {
           ASSERT(var->rewrite_ == NULL ||
-                 (var->slot() != NULL &&
-                  var->slot()->type() == Slot::PARAMETER));
+                 (var->AsSlot() != NULL &&
+                  var->AsSlot()->type() == Slot::PARAMETER));
           // Set the parameter index always, even if the parameter
           // was seen before! (We need to access the actual parameter
           // supplied for the last occurrence of a multiply declared
@@ -888,7 +887,7 @@
   ASSERT(var->scope() == this);
   ASSERT(var->rewrite_ == NULL ||
          (!var->IsVariable(Factory::result_symbol())) ||
-         (var->slot() == NULL || var->slot()->type() != Slot::LOCAL));
+         (var->AsSlot() == NULL || var->AsSlot()->type() != Slot::LOCAL));
   if (var->rewrite_ == NULL && MustAllocate(var)) {
     if (MustAllocateInContext(var)) {
       AllocateHeapSlot(var);
diff --git a/src/scopes.h b/src/scopes.h
index c2354b2..68cf5e5 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -187,21 +187,21 @@
   // Predicates.
 
   // Specific scope types.
-  bool is_eval_scope() const  { return type_ == EVAL_SCOPE; }
-  bool is_function_scope() const  { return type_ == FUNCTION_SCOPE; }
-  bool is_global_scope() const  { return type_ == GLOBAL_SCOPE; }
+  bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
+  bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
+  bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
 
   // Information about which scopes calls eval.
-  bool calls_eval() const  { return scope_calls_eval_; }
-  bool outer_scope_calls_eval() const  { return outer_scope_calls_eval_; }
+  bool calls_eval() const { return scope_calls_eval_; }
+  bool outer_scope_calls_eval() const { return outer_scope_calls_eval_; }
 
   // Is this scope inside a with statement.
-  bool inside_with() const  { return scope_inside_with_; }
+  bool inside_with() const { return scope_inside_with_; }
   // Does this scope contain a with statement.
-  bool contains_with() const  { return scope_contains_with_; }
+  bool contains_with() const { return scope_contains_with_; }
 
   // The scope immediately surrounding this scope, or NULL.
-  Scope* outer_scope() const  { return outer_scope_; }
+  Scope* outer_scope() const { return outer_scope_; }
 
   // ---------------------------------------------------------------------------
   // Accessors.
@@ -217,27 +217,27 @@
   // The variable holding the function literal for named function
   // literals, or NULL.
   // Only valid for function scopes.
-  Variable* function() const  {
+  Variable* function() const {
     ASSERT(is_function_scope());
     return function_;
   }
 
   // Parameters. The left-most parameter has index 0.
   // Only valid for function scopes.
-  Variable* parameter(int index) const  {
+  Variable* parameter(int index) const {
     ASSERT(is_function_scope());
     return params_[index];
   }
 
-  int num_parameters() const  { return params_.length(); }
+  int num_parameters() const { return params_.length(); }
 
   // The local variable 'arguments' if we need to allocate it; NULL otherwise.
   // If arguments() exist, arguments_shadow() exists, too.
-  VariableProxy* arguments()  const  { return arguments_; }
+  Variable* arguments() const { return arguments_; }
 
   // The '.arguments' shadow variable if we need to allocate it; NULL otherwise.
   // If arguments_shadow() exist, arguments() exists, too.
-  VariableProxy* arguments_shadow()  const  { return arguments_shadow_; }
+  Variable* arguments_shadow() const { return arguments_shadow_; }
 
   // Declarations list.
   ZoneList<Declaration*>* declarations() { return &decls_; }
@@ -262,8 +262,8 @@
   void AllocateVariables(Handle<Context> context);
 
   // Result of variable allocation.
-  int num_stack_slots() const  { return num_stack_slots_; }
-  int num_heap_slots() const  { return num_heap_slots_; }
+  int num_stack_slots() const { return num_stack_slots_; }
+  int num_heap_slots() const { return num_heap_slots_; }
 
   // Make sure this scope and all outer scopes are eagerly compiled.
   void ForceEagerCompilation()  { force_eager_compilation_ = true; }
@@ -322,9 +322,9 @@
   // Function variable, if any; function scopes only.
   Variable* function_;
   // Convenience variable; function scopes only.
-  VariableProxy* arguments_;
+  Variable* arguments_;
   // Convenience variable; function scopes only.
-  VariableProxy* arguments_shadow_;
+  Variable* arguments_shadow_;
 
   // Illegal redeclaration.
   Expression* illegal_redecl_;
diff --git a/src/spaces.h b/src/spaces.h
index 9ffa940..94e0cd2 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -243,8 +243,10 @@
   static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
     kIntSize + kPointerSize;
 
-  // The start offset of the object area in a page.
-  static const int kObjectStartOffset = MAP_POINTER_ALIGN(kPageHeaderSize);
+  // The start offset of the object area in a page. Aligned to both maps and
+  // code alignment to be suitable for both.
+  static const int kObjectStartOffset =
+      CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
 
   // Object area size in bytes.
   static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 34989d3..6b41577 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1186,25 +1186,43 @@
 
 Object* LoadStubCompiler::GetCode(PropertyType type, String* name) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
-  return GetCodeWithFlags(flags, name);
+  Object* result = GetCodeWithFlags(flags, name);
+  if (!result->IsFailure()) {
+    PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(result), name));
+  }
+  return result;
 }
 
 
 Object* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, type);
-  return GetCodeWithFlags(flags, name);
+  Object* result = GetCodeWithFlags(flags, name);
+  if (!result->IsFailure()) {
+    PROFILE(
+        CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(result), name));
+  }
+  return result;
 }
 
 
 Object* StoreStubCompiler::GetCode(PropertyType type, String* name) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type);
-  return GetCodeWithFlags(flags, name);
+  Object* result = GetCodeWithFlags(flags, name);
+  if (!result->IsFailure()) {
+    PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(result), name));
+  }
+  return result;
 }
 
 
 Object* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type);
-  return GetCodeWithFlags(flags, name);
+  Object* result = GetCodeWithFlags(flags, name);
+  if (!result->IsFailure()) {
+    PROFILE(
+        CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(result), name));
+  }
+  return result;
 }
 
 
@@ -1227,7 +1245,7 @@
                                             String* fname) {
   ASSERT(generator_id >= 0 && generator_id < kNumCallGenerators);
   switch (generator_id) {
-#define CALL_GENERATOR_CASE(ignored1, ignored2, ignored3, name) \
+#define CALL_GENERATOR_CASE(ignored1, ignored2, name)           \
     case k##name##CallGenerator:                                \
       return CallStubCompiler::Compile##name##Call(object,      \
                                                    holder,      \
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 388bb52..c47cab7 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -370,13 +370,15 @@
                                                   Register prototype);
 
   // Generates prototype loading code that uses the objects from the
-  // context we were in when this function was called.  This ties the
-  // generated code to a particular context and so must not be used in
-  // cases where the generated code is not allowed to have references
-  // to objects from a context.
+  // context we were in when this function was called. If the context
+  // has changed, a jump to miss is performed. This ties the generated
+  // code to a particular context and so must not be used in cases
+  // where the generated code is not allowed to have references to
+  // objects from a context.
   static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
                                                         int index,
-                                                        Register prototype);
+                                                        Register prototype,
+                                                        Label* miss);
 
   static void GenerateFastPropertyLoad(MacroAssembler* masm,
                                        Register dst, Register src,
@@ -612,29 +614,25 @@
 // Installation of custom call generators for the selected builtins is
 // handled by the bootstrapper.
 //
-// Each entry has a name of a global function (lowercased), a flag
-// controlling whether the generator is set on the function itself or
-// on its instance prototype, a name of a builtin function on the
-// function or its instance prototype (the one the generator is set
-// for), and a name of a generator itself (used to build ids and
-// generator function names).
-#define CUSTOM_CALL_IC_GENERATORS(V)                          \
-  V(array, INSTANCE_PROTOTYPE, push, ArrayPush)               \
-  V(array, INSTANCE_PROTOTYPE, pop, ArrayPop)                 \
-  V(string, INSTANCE_PROTOTYPE, charCodeAt, StringCharCodeAt) \
-  V(string, INSTANCE_PROTOTYPE, charAt, StringCharAt)         \
-  V(string, FUNCTION, fromCharCode, StringFromCharCode)
+// Each entry has a name of a global object property holding an object
+// optionally followed by ".prototype" (this controls whether the
+// generator is set on the object itself or, in case it's a function,
+// on the its instance prototype), a name of a builtin function on the
+// object (the one the generator is set for), and a name of the
+// generator (used to build ids and generator function names).
+#define CUSTOM_CALL_IC_GENERATORS(V)                \
+  V(Array.prototype, push, ArrayPush)               \
+  V(Array.prototype, pop, ArrayPop)                 \
+  V(String.prototype, charCodeAt, StringCharCodeAt) \
+  V(String.prototype, charAt, StringCharAt)         \
+  V(String, fromCharCode, StringFromCharCode)       \
+  V(Math, floor, MathFloor)
 
 
 class CallStubCompiler: public StubCompiler {
  public:
-  enum CustomGeneratorOwner {
-    FUNCTION,
-    INSTANCE_PROTOTYPE
-  };
-
   enum {
-#define DECLARE_CALL_GENERATOR_ID(ignored1, ignore2, ignored3, name) \
+#define DECLARE_CALL_GENERATOR_ID(ignored1, ignore2, name) \
     k##name##CallGenerator,
     CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR_ID)
 #undef DECLARE_CALL_GENERATOR_ID
@@ -673,11 +671,11 @@
                             JSFunction* function,
                             String* name);
 
-#define DECLARE_CALL_GENERATOR(ignored1, ignored2, ignored3, name) \
-  Object* Compile##name##Call(Object* object,                      \
-                              JSObject* holder,                    \
-                              JSGlobalPropertyCell* cell,          \
-                              JSFunction* function,                \
+#define DECLARE_CALL_GENERATOR(ignored1, ignored2,  name) \
+  Object* Compile##name##Call(Object* object,             \
+                              JSObject* holder,           \
+                              JSGlobalPropertyCell* cell, \
+                              JSFunction* function,       \
                               String* fname);
   CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
 #undef DECLARE_CALL_GENERATOR
diff --git a/src/unicode.h b/src/unicode.h
index a3b799e..9d1d683 100644
--- a/src/unicode.h
+++ b/src/unicode.h
@@ -120,6 +120,9 @@
   static inline unsigned Encode(char* out, uchar c);
   static const byte* ReadBlock(Buffer<const char*> str, byte* buffer,
       unsigned capacity, unsigned* chars_read, unsigned* offset);
+  static uchar CalculateValue(const byte* str,
+                              unsigned length,
+                              unsigned* cursor);
   static const uchar kBadChar = 0xFFFD;
   static const unsigned kMaxEncodedSize   = 4;
   static const unsigned kMaxOneByteChar   = 0x7f;
@@ -133,9 +136,6 @@
   static inline uchar ValueOf(const byte* str,
                               unsigned length,
                               unsigned* cursor);
-  static uchar CalculateValue(const byte* str,
-                              unsigned length,
-                              unsigned* cursor);
 };
 
 // --- C h a r a c t e r   S t r e a m ---
diff --git a/src/utils.h b/src/utils.h
index d605891..fefbfe9 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -391,6 +391,12 @@
   // Factory method for creating empty vectors.
   static Vector<T> empty() { return Vector<T>(NULL, 0); }
 
+  template<typename S>
+  static Vector<T> cast(Vector<S> input) {
+    return Vector<T>(reinterpret_cast<T*>(input.start()),
+                     input.length() * sizeof(S) / sizeof(T));
+  }
+
  protected:
   void set_start(T* start) { start_ = start; }
 
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 8c948cc..a8eb9d2 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -161,6 +161,8 @@
   SC(named_load_inline_miss, V8.NamedLoadInlineMiss)                  \
   SC(named_load_global_inline, V8.NamedLoadGlobalInline)              \
   SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss)     \
+  SC(named_load_global_stub, V8.NamedLoadGlobalStub)                  \
+  SC(named_load_global_stub_miss, V8.NamedLoadGlobalStubMiss)         \
   SC(keyed_store_field, V8.KeyedStoreField)                           \
   SC(keyed_store_inline, V8.KeyedStoreInline)                         \
   SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss)                \
diff --git a/src/variables.cc b/src/variables.cc
index f46a54d..504e224 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -70,24 +70,19 @@
 }
 
 
-Property* Variable::AsProperty() {
+Property* Variable::AsProperty() const {
   return rewrite_ == NULL ? NULL : rewrite_->AsProperty();
 }
 
 
-Variable* Variable::AsVariable()  {
-  return rewrite_ == NULL || rewrite_->AsSlot() != NULL ? this : NULL;
-}
-
-
-Slot* Variable::slot() const {
-  return rewrite_ != NULL ? rewrite_->AsSlot() : NULL;
+Slot* Variable::AsSlot() const {
+  return rewrite_ == NULL ? NULL : rewrite_->AsSlot();
 }
 
 
 bool Variable::IsStackAllocated() const {
-  Slot* s = slot();
-  return s != NULL && s->IsStackAllocated();
+  Slot* slot = AsSlot();
+  return slot != NULL && slot->IsStackAllocated();
 }
 
 
diff --git a/src/variables.h b/src/variables.h
index 618f6ac..ec76fee 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -122,19 +122,20 @@
   static const char* Mode2String(Mode mode);
 
   // Type testing & conversion
-  Property* AsProperty();
-  Variable* AsVariable();
+  Property* AsProperty() const;
+  Slot* AsSlot() const;
+
   bool IsValidLeftHandSide() { return is_valid_LHS_; }
 
   // The source code for an eval() call may refer to a variable that is
   // in an outer scope about which we don't know anything (it may not
   // be the global scope). scope() is NULL in that case. Currently the
   // scope is only used to follow the context chain length.
-  Scope* scope() const  { return scope_; }
+  Scope* scope() const { return scope_; }
 
-  Handle<String> name() const  { return name_; }
-  Mode mode() const  { return mode_; }
-  bool is_accessed_from_inner_scope() const  {
+  Handle<String> name() const { return name_; }
+  Mode mode() const { return mode_; }
+  bool is_accessed_from_inner_scope() const {
     return is_accessed_from_inner_scope_;
   }
   bool is_used() { return is_used_; }
@@ -171,8 +172,7 @@
     local_if_not_shadowed_ = local;
   }
 
-  Expression* rewrite() const  { return rewrite_; }
-  Slot* slot() const;
+  Expression* rewrite() const { return rewrite_; }
 
   StaticType* type() { return &type_; }
 
diff --git a/src/version.cc b/src/version.cc
index f105cbd..5be1e82 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,9 +34,9 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     4
-#define BUILD_NUMBER      3
+#define BUILD_NUMBER      7
 #define PATCH_LEVEL       0
-#define CANDIDATE_VERSION false
+#define CANDIDATE_VERSION true
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
 // shared library instead the generic SONAME generated from the V8 version
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 9318bb8..6e29b7a 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -418,6 +418,20 @@
 }
 
 
+void Assembler::bind(NearLabel* L) {
+  ASSERT(!L->is_bound());
+  last_pc_ = NULL;
+  while (L->unresolved_branches_ > 0) {
+    int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
+    int disp = pc_offset() - branch_pos;
+    ASSERT(is_int8(disp));
+    set_byte_at(branch_pos - sizeof(int8_t), disp);
+    L->unresolved_branches_--;
+  }
+  L->bind_to(pc_offset());
+}
+
+
 void Assembler::GrowBuffer() {
   ASSERT(buffer_overflow());
   if (!own_buffer_) FATAL("external code buffer is too small");
@@ -1227,6 +1241,27 @@
 }
 
 
+void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(0 <= cc && cc < 16);
+  if (FLAG_emit_branch_hints && hint != no_hint) emit(hint);
+  if (L->is_bound()) {
+    const int short_size = 2;
+    int offs = L->pos() - pc_offset();
+    ASSERT(offs <= 0);
+    ASSERT(is_int8(offs - short_size));
+    // 0111 tttn #8-bit disp
+    emit(0x70 | cc);
+    emit((offs - short_size) & 0xFF);
+  } else {
+    emit(0x70 | cc);
+    emit(0x00);      // The displacement will be resolved later.
+    L->link_to(pc_offset());
+  }
+}
+
+
 void Assembler::jmp(Label* L) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1269,6 +1304,25 @@
 }
 
 
+void Assembler::jmp(NearLabel* L) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (L->is_bound()) {
+    const int short_size = sizeof(int8_t);
+    int offs = L->pos() - pc_offset();
+    ASSERT(offs <= 0);
+    ASSERT(is_int8(offs - short_size));
+    // 1110 1011 #8-bit disp.
+    emit(0xEB);
+    emit((offs - short_size) & 0xFF);
+  } else {
+    emit(0xEB);
+    emit(0x00);      // The displacement will be resolved later.
+    L->link_to(pc_offset());
+  }
+}
+
+
 void Assembler::jmp(Register target) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 7082af7..bbc1010 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -92,13 +92,13 @@
     Register r = { code };
     return r;
   }
-  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
-  bool is(Register reg) const  { return code_ == reg.code_; }
-  int code() const  {
+  bool is_valid() const { return 0 <= code_ && code_ < 16; }
+  bool is(Register reg) const { return code_ == reg.code_; }
+  int code() const {
     ASSERT(is_valid());
     return code_;
   }
-  int bit() const  {
+  int bit() const {
     return 1 << code_;
   }
 
@@ -138,8 +138,8 @@
 
 
 struct XMMRegister {
-  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
-  int code() const  {
+  bool is_valid() const { return 0 <= code_ && code_ < 16; }
+  int code() const {
     ASSERT(is_valid());
     return code_;
   }
@@ -1005,6 +1005,7 @@
   // but it may be bound only once.
 
   void bind(Label* L);  // binds an unbound label L to the current code position
+  void bind(NearLabel* L);
 
   // Calls
   // Call near relative 32-bit displacement, relative to next instruction.
@@ -1029,10 +1030,16 @@
   // Jump near absolute indirect (m64)
   void jmp(const Operand& src);
 
+  // Short jump
+  void jmp(NearLabel* L);
+
   // Conditional jumps
   void j(Condition cc, Label* L);
   void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
 
+  // Conditional short jump
+  void j(Condition cc, NearLabel* L, Hint hint = no_hint);
+
   // Floating-point operations
   void fld(int i);
 
@@ -1171,9 +1178,9 @@
   void RecordStatementPosition(int pos);
   bool WriteRecordedPositions();
 
-  int pc_offset() const  { return static_cast<int>(pc_ - buffer_); }
+  int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
   int current_statement_position() const { return current_statement_position_; }
-  int current_position() const  { return current_position_; }
+  int current_position() const { return current_position_; }
 
   // Check if there is less than kGap bytes available in the buffer.
   // If this is the case, we need to grow the buffer before emitting
@@ -1196,6 +1203,7 @@
  private:
   byte* addr_at(int pos)  { return buffer_ + pos; }
   byte byte_at(int pos)  { return buffer_[pos]; }
+  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
   uint32_t long_at(int pos)  {
     return *reinterpret_cast<uint32_t*>(addr_at(pos));
   }
@@ -1371,7 +1379,6 @@
   // labels
   // void print(Label* L);
   void bind_to(Label* L, int pos);
-  void link_to(Label* L, Label* appendix);
 
   // record reloc info for current pc_
   void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 85ad637..08c19ba 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -913,7 +913,11 @@
 
 
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function) {
+                                           bool is_api_function,
+                                           bool count_constructions) {
+  // Should never count constructions for api objects.
+  ASSERT(!is_api_function || !count_constructions);
+
     // Enter a construct frame.
   __ EnterConstructFrame();
 
@@ -958,6 +962,26 @@
     __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
     __ j(equal, &rt_call);
 
+    if (count_constructions) {
+      Label allocate;
+      // Decrease generous allocation count.
+      __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+      __ decb(FieldOperand(rcx, SharedFunctionInfo::kConstructionCountOffset));
+      __ j(not_zero, &allocate);
+
+      __ push(rax);
+      __ push(rdi);
+
+      __ push(rdi);  // constructor
+      // The call will replace the stub, so the countdown is only done once.
+      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+      __ pop(rdi);
+      __ pop(rax);
+
+      __ bind(&allocate);
+    }
+
     // Now allocate the JSObject on the heap.
     __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
     __ shl(rdi, Immediate(kPointerSizeLog2));
@@ -981,7 +1005,12 @@
     // rbx: JSObject
     // rdi: start of next object
     { Label loop, entry;
-      __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+      // To allow for truncation.
+      if (count_constructions) {
+        __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
+      } else {
+        __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+      }
       __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
       __ jmp(&entry);
       __ bind(&loop);
@@ -1164,13 +1193,18 @@
 }
 
 
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, true);
+}
+
+
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false);
+  Generate_JSConstructStubHelper(masm, false, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, true);
+  Generate_JSConstructStubHelper(masm, true, false);
 }
 
 
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index c75b945..9d82e0e 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -203,7 +203,7 @@
 
 
 void ToBooleanStub::Generate(MacroAssembler* masm) {
-  Label false_result, true_result, not_string;
+  NearLabel false_result, true_result, not_string;
   __ movq(rax, Operand(rsp, 1 * kPointerSize));
 
   // 'null' => false.
@@ -989,7 +989,7 @@
   Label runtime_call;
   Label runtime_call_clear_stack;
   Label input_not_smi;
-  Label loaded;
+  NearLabel loaded;
   // Test that rax is a number.
   __ movq(rax, Operand(rsp, kPointerSize));
   __ JumpIfNotSmi(rax, &input_not_smi);
@@ -1069,7 +1069,7 @@
   __ addl(rcx, rcx);
   __ lea(rcx, Operand(rax, rcx, times_8, 0));
   // Check if cache matches: Double value is stored in uint32_t[2] array.
-  Label cache_miss;
+  NearLabel cache_miss;
   __ cmpq(rbx, Operand(rcx, 0));
   __ j(not_equal, &cache_miss);
   // Cache hit!
@@ -1160,7 +1160,7 @@
 
   // Compute st(0) % st(1)
   {
-    Label partial_remainder_loop;
+    NearLabel partial_remainder_loop;
     __ bind(&partial_remainder_loop);
     __ fprem1();
     __ fwait();
@@ -1202,7 +1202,7 @@
   // cvttsd2si (32-bit version) directly.
   Register double_exponent = rbx;
   Register double_value = rdi;
-  Label done, exponent_63_plus;
+  NearLabel done, exponent_63_plus;
   // Get double and extract exponent.
   __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
   // Clear result preemptively, in case we need to return zero.
@@ -1404,33 +1404,35 @@
   Label slow, done;
 
   if (op_ == Token::SUB) {
-    // Check whether the value is a smi.
-    Label try_float;
-    __ JumpIfNotSmi(rax, &try_float);
+    if (include_smi_code_) {
+      // Check whether the value is a smi.
+      Label try_float;
+      __ JumpIfNotSmi(rax, &try_float);
+      if (negative_zero_ == kIgnoreNegativeZero) {
+        __ SmiCompare(rax, Smi::FromInt(0));
+        __ j(equal, &done);
+      }
+      __ SmiNeg(rax, rax, &done);
 
-    if (negative_zero_ == kIgnoreNegativeZero) {
-      __ SmiCompare(rax, Smi::FromInt(0));
-      __ j(equal, &done);
+      // Either zero or Smi::kMinValue, neither of which become a smi when
+      // negated. We handle negative zero here if required. We always enter
+      // the runtime system if we have Smi::kMinValue.
+      if (negative_zero_ == kStrictNegativeZero) {
+        __ SmiCompare(rax, Smi::FromInt(0));
+        __ j(not_equal, &slow);
+        __ Move(rax, Factory::minus_zero_value());
+        __ jmp(&done);
+      } else  {
+        __ SmiCompare(rax, Smi::FromInt(Smi::kMinValue));
+        __ j(equal, &slow);
+        __ jmp(&done);
+      }
+      // Try floating point case.
+      __ bind(&try_float);
+    } else if (FLAG_debug_code) {
+      __ AbortIfSmi(rax);
     }
 
-    // Enter runtime system if the value of the smi is zero
-    // to make sure that we switch between 0 and -0.
-    // Also enter it if the value of the smi is Smi::kMinValue.
-    __ SmiNeg(rax, rax, &done);
-
-    // Either zero or Smi::kMinValue, neither of which become a smi when
-    // negated.
-    if (negative_zero_ == kStrictNegativeZero) {
-      __ SmiCompare(rax, Smi::FromInt(0));
-      __ j(not_equal, &slow);
-      __ Move(rax, Factory::minus_zero_value());
-      __ jmp(&done);
-    } else  {
-      __ jmp(&slow);
-    }
-
-    // Try floating point case.
-    __ bind(&try_float);
     __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
     __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
     __ j(not_equal, &slow);
@@ -1449,6 +1451,17 @@
       __ movq(rax, rcx);
     }
   } else if (op_ == Token::BIT_NOT) {
+    if (include_smi_code_) {
+      Label try_float;
+      __ JumpIfNotSmi(rax, &try_float);
+      __ SmiNot(rax, rax);
+      __ jmp(&done);
+      // Try floating point case.
+      __ bind(&try_float);
+    } else if (FLAG_debug_code) {
+      __ AbortIfSmi(rax);
+    }
+
     // Check if the operand is a heap number.
     __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
     __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
@@ -1758,7 +1771,7 @@
 
   // rcx: RegExp data (FixedArray)
   // Check the representation and encoding of the subject string.
-  Label seq_ascii_string, seq_two_byte_string, check_code;
+  NearLabel seq_ascii_string, seq_two_byte_string, check_code;
   __ movq(rax, Operand(rsp, kSubjectOffset));
   __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
@@ -1883,7 +1896,7 @@
 
   // Argument 4: End of string data
   // Argument 3: Start of string data
-  Label setup_two_byte, setup_rest;
+  NearLabel setup_two_byte, setup_rest;
   __ testb(rdi, rdi);
   __ j(zero, &setup_two_byte);
   __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
@@ -1910,10 +1923,10 @@
   __ pop(rsi);
 
   // Check the result.
-  Label success;
+  NearLabel success;
   __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
   __ j(equal, &success);
-  Label failure;
+  NearLabel failure;
   __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
   __ j(equal, &failure);
   __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
@@ -1968,7 +1981,7 @@
   // rbx: last_match_info backing store (FixedArray)
   // rcx: offsets vector
   // rdx: number of capture registers
-  Label next_capture, done;
+  NearLabel next_capture, done;
   // Capture register counter starts from number of capture registers and
   // counts down until wraping after zero.
   __ bind(&next_capture);
@@ -1976,7 +1989,7 @@
   __ j(negative, &done);
   // Read the value from the static offsets vector buffer and make it a smi.
   __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
-  __ Integer32ToSmi(rdi, rdi, &runtime);
+  __ Integer32ToSmi(rdi, rdi);
   // Store the smi value in the last match info.
   __ movq(FieldOperand(rbx,
                        rdx,
@@ -2115,6 +2128,26 @@
   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
 
   Label check_unequal_objects, done;
+
+  // Compare two smis if required.
+  if (include_smi_compare_) {
+    Label non_smi, smi_done;
+    __ JumpIfNotBothSmi(rax, rdx, &non_smi);
+    __ subq(rdx, rax);
+    __ j(no_overflow, &smi_done);
+    __ neg(rdx);  // Correct sign in case of overflow.
+    __ bind(&smi_done);
+    __ movq(rax, rdx);
+    __ ret(0);
+    __ bind(&non_smi);
+  } else if (FLAG_debug_code) {
+    Label ok;
+    __ JumpIfNotSmi(rdx, &ok);
+    __ JumpIfNotSmi(rax, &ok);
+    __ Abort("CompareStub: smi operands");
+    __ bind(&ok);
+  }
+
   // The compare stub returns a positive, negative, or zero 64-bit integer
   // value in rax, corresponding to result of comparing the two inputs.
   // NOTICE! This code is only reached after a smi-fast-case check, so
@@ -2122,14 +2155,14 @@
 
   // Two identical objects are equal unless they are both NaN or undefined.
   {
-    Label not_identical;
+    NearLabel not_identical;
     __ cmpq(rax, rdx);
     __ j(not_equal, &not_identical);
 
     if (cc_ != equal) {
       // Check for undefined.  undefined OP undefined is false even though
       // undefined == undefined.
-      Label check_for_nan;
+      NearLabel check_for_nan;
       __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
       __ j(not_equal, &check_for_nan);
       __ Set(rax, NegativeComparisonResult(cc_));
@@ -2147,7 +2180,7 @@
       __ Set(rax, EQUAL);
       __ ret(0);
     } else {
-      Label heap_number;
+      NearLabel heap_number;
       // If it's not a heap number, then return equal for (in)equality operator.
       __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
              Factory::heap_number_map());
@@ -2211,7 +2244,7 @@
 
       // If the first object is a JS object, we have done pointer comparison.
       STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-      Label first_non_object;
+      NearLabel first_non_object;
       __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
       __ j(below, &first_non_object);
       // Return non-zero (eax (not rax) is not zero)
@@ -2240,7 +2273,7 @@
   // Generate the number comparison code.
   if (include_number_compare_) {
     Label non_number_comparison;
-    Label unordered;
+    NearLabel unordered;
     FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
     __ xorl(rax, rax);
     __ xorl(rcx, rcx);
@@ -2304,7 +2337,7 @@
     // Not strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
-    Label not_both_objects, return_unequal;
+    NearLabel not_both_objects, return_unequal;
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
@@ -2462,7 +2495,7 @@
   // Before returning we restore the context from the frame pointer if not NULL.
   // The frame pointer is NULL in the exception handler of a JS entry frame.
   __ xor_(rsi, rsi);  // tentatively set context pointer to NULL
-  Label skip;
+  NearLabel skip;
   __ cmpq(rbp, Immediate(0));
   __ j(equal, &skip);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2622,7 +2655,7 @@
   // Handling of failure.
   __ bind(&failure_returned);
 
-  Label retry;
+  NearLabel retry;
   // If the returned exception is RETRY_AFTER_GC continue at retry label
   STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
   __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
@@ -2662,7 +2695,7 @@
   __ movq(rsp, Operand(kScratchRegister, 0));
 
   // Unwind the handlers until the ENTRY handler is found.
-  Label loop, done;
+  NearLabel loop, done;
   __ bind(&loop);
   // Load the type of the current stack handler.
   const int kStateOffset = StackHandlerConstants::kStateOffset;
@@ -2932,7 +2965,7 @@
   // rdx is function, rax is map.
 
   // Look up the function and the map in the instanceof cache.
-  Label miss;
+  NearLabel miss;
   __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
   __ j(not_equal, &miss);
   __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
@@ -2960,7 +2993,7 @@
   __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
 
   // Loop through the prototype chain looking for the function prototype.
-  Label loop, is_instance, is_not_instance;
+  NearLabel loop, is_instance, is_not_instance;
   __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
   __ bind(&loop);
   __ cmpq(rcx, rbx);
@@ -3001,7 +3034,8 @@
          | RegisterField::encode(false)    // lhs_ and rhs_ are not used
          | StrictField::encode(strict_)
          | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
-         | IncludeNumberCompareField::encode(include_number_compare_);
+         | IncludeNumberCompareField::encode(include_number_compare_)
+         | IncludeSmiCompareField::encode(include_smi_compare_);
 }
 
 
@@ -3041,12 +3075,18 @@
     include_number_compare_name = "_NO_NUMBER";
   }
 
+  const char* include_smi_compare_name = "";
+  if (!include_smi_compare_) {
+    include_smi_compare_name = "_NO_SMI";
+  }
+
   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
                "CompareStub_%s%s%s%s",
                cc_name,
                strict_name,
                never_nan_nan_name,
-               include_number_compare_name);
+               include_number_compare_name,
+               include_smi_compare_name);
   return name_;
 }
 
@@ -3265,7 +3305,7 @@
   // rax: first string
   // rdx: second string
   // Check if either of the strings are empty. In that case return the other.
-  Label second_not_zero_length, both_not_zero_length;
+  NearLabel second_not_zero_length, both_not_zero_length;
   __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
   __ SmiTest(rcx);
   __ j(not_zero, &second_not_zero_length);
@@ -3303,7 +3343,7 @@
 
   // Look at the length of the result of adding the two strings.
   STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
-  __ SmiAdd(rbx, rbx, rcx, NULL);
+  __ SmiAdd(rbx, rbx, rcx);
   // Use the runtime system when adding two one character strings, as it
   // contains optimizations for this specific case using the symbol table.
   __ SmiCompare(rbx, Smi::FromInt(2));
@@ -3521,7 +3561,7 @@
   ASSERT(count.is(rcx));  // rep movs count
 
   // Nothing to do for zero characters.
-  Label done;
+  NearLabel done;
   __ testl(count, count);
   __ j(zero, &done);
 
@@ -3532,7 +3572,7 @@
   }
 
   // Don't enter the rep movs if there are less than 4 bytes to copy.
-  Label last_bytes;
+  NearLabel last_bytes;
   __ testl(count, Immediate(~7));
   __ j(zero, &last_bytes);
 
@@ -3576,7 +3616,7 @@
 
   // Make sure that both characters are not digits as such strings has a
   // different hash algorithm. Don't try to look for these in the symbol table.
-  Label not_array_index;
+  NearLabel not_array_index;
   __ leal(scratch, Operand(c1, -'0'));
   __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
   __ j(above, &not_array_index);
@@ -3763,7 +3803,7 @@
   __ movq(rdx, Operand(rsp, kFromOffset));
   __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
 
-  __ SmiSub(rcx, rcx, rdx, NULL);  // Overflow doesn't happen.
+  __ SmiSub(rcx, rcx, rdx);  // Overflow doesn't happen.
   __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
   Label return_rax;
   __ j(equal, &return_rax);
@@ -3896,21 +3936,20 @@
   __ movq(scratch4, scratch1);
   __ SmiSub(scratch4,
             scratch4,
-            FieldOperand(right, String::kLengthOffset),
-            NULL);
+            FieldOperand(right, String::kLengthOffset));
   // Register scratch4 now holds left.length - right.length.
   const Register length_difference = scratch4;
-  Label left_shorter;
+  NearLabel left_shorter;
   __ j(less, &left_shorter);
   // The right string isn't longer that the left one.
   // Get the right string's length by subtracting the (non-negative) difference
   // from the left string's length.
-  __ SmiSub(scratch1, scratch1, length_difference, NULL);
+  __ SmiSub(scratch1, scratch1, length_difference);
   __ bind(&left_shorter);
   // Register scratch1 now holds Min(left.length, right.length).
   const Register min_length = scratch1;
 
-  Label compare_lengths;
+  NearLabel compare_lengths;
   // If min-length is zero, go directly to comparing lengths.
   __ SmiTest(min_length);
   __ j(zero, &compare_lengths);
@@ -3918,7 +3957,7 @@
   __ SmiToInteger32(min_length, min_length);
 
   // Registers scratch2 and scratch3 are free.
-  Label result_not_equal;
+  NearLabel result_not_equal;
   Label loop;
   {
     // Check characters 0 .. min_length - 1 in a loop.
@@ -3954,7 +3993,7 @@
   __ Move(rax, Smi::FromInt(EQUAL));
   __ ret(0);
 
-  Label result_greater;
+  NearLabel result_greater;
   __ bind(&result_not_equal);
   // Unequal comparison of left to right, either character or length.
   __ j(greater, &result_greater);
@@ -3982,7 +4021,7 @@
   __ movq(rax, Operand(rsp, 1 * kPointerSize));  // right
 
   // Check for identity.
-  Label not_same;
+  NearLabel not_same;
   __ cmpq(rdx, rax);
   __ j(not_equal, &not_same);
   __ Move(rax, Smi::FromInt(EQUAL));
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index b1dd45e..0d8b827 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -248,7 +248,7 @@
       // the function.
       for (int i = 0; i < scope()->num_parameters(); i++) {
         Variable* par = scope()->parameter(i);
-        Slot* slot = par->slot();
+        Slot* slot = par->AsSlot();
         if (slot != NULL && slot->type() == Slot::CONTEXT) {
           // The use of SlotOperand below is safe in unspilled code
           // because the slot is guaranteed to be a context slot.
@@ -284,7 +284,7 @@
     // Initialize ThisFunction reference if present.
     if (scope()->is_function_scope() && scope()->function() != NULL) {
       frame_->Push(Factory::the_hole_value());
-      StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
+      StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
     }
 
     // Initialize the function return target after the locals are set
@@ -601,10 +601,10 @@
     Property property(&global, &key, RelocInfo::kNoPosition);
     Reference ref(this, &property);
     ref.GetValue();
-  } else if (variable != NULL && variable->slot() != NULL) {
+  } else if (variable != NULL && variable->AsSlot() != NULL) {
     // For a variable that rewrites to a slot, we signal it is the immediate
     // subexpression of a typeof.
-    LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+    LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
   } else {
     // Anything else can be handled normally.
     Load(expr);
@@ -643,17 +643,17 @@
     frame_->Push(&result);
   }
 
-  Variable* arguments = scope()->arguments()->var();
-  Variable* shadow = scope()->arguments_shadow()->var();
-  ASSERT(arguments != NULL && arguments->slot() != NULL);
-  ASSERT(shadow != NULL && shadow->slot() != NULL);
+  Variable* arguments = scope()->arguments();
+  Variable* shadow = scope()->arguments_shadow();
+  ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
+  ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
   JumpTarget done;
   bool skip_arguments = false;
   if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
     // We have to skip storing into the arguments slot if it has
     // already been written to. This can happen if the a function
     // has a local variable named 'arguments'.
-    LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
+    LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
     Result probe = frame_->Pop();
     if (probe.is_constant()) {
       // We have to skip updating the arguments object if it has
@@ -666,10 +666,10 @@
     }
   }
   if (!skip_arguments) {
-    StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+    StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
     if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
   }
-  StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+  StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
   return frame_->Pop();
 }
 
@@ -726,7 +726,7 @@
       LoadGlobal();
       ref->set_type(Reference::NAMED);
     } else {
-      ASSERT(var->slot() != NULL);
+      ASSERT(var->AsSlot() != NULL);
       ref->set_type(Reference::SLOT);
     }
   } else {
@@ -1350,11 +1350,14 @@
                                           overwrite_mode);
 
     Label do_op;
+    // Left operand must be unchanged in left->reg() for deferred code.
+    // Left operand is in answer.reg(), possibly converted to int32, for
+    // inline code.
+    __ movq(answer.reg(), left->reg());
     if (right_type_info.IsSmi()) {
       if (FLAG_debug_code) {
         __ AbortIfNotSmi(right->reg());
       }
-      __ movq(answer.reg(), left->reg());
       // If left is not known to be a smi, check if it is.
       // If left is not known to be a number, and it isn't a smi, check if
       // it is a HeapNumber.
@@ -1371,7 +1374,7 @@
                      FieldOperand(answer.reg(), HeapNumber::kValueOffset));
         // Branch if we might have overflowed.
         // (False negative for Smi::kMinValue)
-        __ cmpq(answer.reg(), Immediate(0x80000000));
+        __ cmpl(answer.reg(), Immediate(0x80000000));
         deferred->Branch(equal);
         // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
         __ Integer32ToSmi(answer.reg(), answer.reg());
@@ -1390,18 +1393,18 @@
     // Perform the operation.
     switch (op) {
       case Token::SAR:
-        __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
+        __ SmiShiftArithmeticRight(answer.reg(), answer.reg(), rcx);
         break;
       case Token::SHR: {
         __ SmiShiftLogicalRight(answer.reg(),
-                              left->reg(),
-                              rcx,
-                              deferred->entry_label());
+                                answer.reg(),
+                                rcx,
+                                deferred->entry_label());
         break;
       }
       case Token::SHL: {
         __ SmiShiftLeft(answer.reg(),
-                        left->reg(),
+                        answer.reg(),
                         rcx);
         break;
       }
@@ -1940,6 +1943,19 @@
 }
 
 
+static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
+                                        bool inline_number_compare) {
+  CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
+  if (nan_info == kCantBothBeNaN) {
+    flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
+  }
+  if (inline_number_compare) {
+    flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
+  }
+  return flags;
+}
+
+
 void CodeGenerator::Comparison(AstNode* node,
                                Condition cc,
                                bool strict,
@@ -2070,7 +2086,9 @@
 
       // Setup and call the compare stub.
       is_not_string.Bind(&left_side);
-      CompareStub stub(cc, strict, kCantBothBeNaN);
+      CompareFlags flags =
+          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
+      CompareStub stub(cc, strict, flags);
       Result result = frame_->CallStub(&stub, &left_side, &right_side);
       result.ToRegister();
       __ testq(result.reg(), result.reg());
@@ -2174,7 +2192,8 @@
 
       // End of in-line compare, call out to the compare stub. Don't include
       // number comparison in the stub if it was inlined.
-      CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+      CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
+      CompareStub stub(cc, strict, flags);
       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
       __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flag.
       answer.Unuse();
@@ -2207,7 +2226,9 @@
 
         // End of in-line compare, call out to the compare stub. Don't include
         // number comparison in the stub if it was inlined.
-        CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+        CompareFlags flags =
+            ComputeCompareFlags(nan_info, inline_number_compare);
+        CompareStub stub(cc, strict, flags);
         Result answer = frame_->CallStub(&stub, &left_side, &right_side);
         __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flags.
         answer.Unuse();
@@ -2332,7 +2353,9 @@
       }
 
       // Setup and call the compare stub.
-      CompareStub stub(cc, strict, kCantBothBeNaN);
+      CompareFlags flags =
+          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
+      CompareStub stub(cc, strict, flags);
       Result result = frame_->CallStub(&stub, left_side, right_side);
       result.ToRegister();
       __ testq(result.reg(), result.reg());
@@ -2476,7 +2499,7 @@
   // Load the receiver and the existing arguments object onto the
   // expression stack. Avoid allocating the arguments object here.
   Load(receiver);
-  LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+  LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
 
   // Emit the source position information after having loaded the
   // receiver and the arguments.
@@ -2737,7 +2760,7 @@
   Comment cmnt(masm_, "[ Declaration");
   Variable* var = node->proxy()->var();
   ASSERT(var != NULL);  // must have been resolved
-  Slot* slot = var->slot();
+  Slot* slot = var->AsSlot();
 
   // If it was not possible to allocate the variable at compile time,
   // we need to "declare" it at runtime to make sure it actually
@@ -3415,7 +3438,7 @@
   // Set number type of the loop variable to smi.
   CheckStack();  // TODO(1222600): ignore if body contains calls.
 
-  SetTypeForStackSlot(loop_var->slot(), TypeInfo::Smi());
+  SetTypeForStackSlot(loop_var->AsSlot(), TypeInfo::Smi());
   Visit(node->body());
 
   if (node->continue_target()->is_linked()) {
@@ -3424,7 +3447,7 @@
 
   if (has_valid_frame()) {
     CodeForStatementPosition(node);
-    Slot* loop_var_slot = loop_var->slot();
+    Slot* loop_var_slot = loop_var->AsSlot();
     if (loop_var_slot->type() == Slot::LOCAL) {
       frame_->TakeLocalAt(loop_var_slot->index());
     } else {
@@ -3898,8 +3921,8 @@
 
   // Store the caught exception in the catch variable.
   Variable* catch_var = node->catch_var()->var();
-  ASSERT(catch_var != NULL && catch_var->slot() != NULL);
-  StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
+  ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
+  StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
 
   // Remove the exception from the stack.
   frame_->Drop();
@@ -4497,7 +4520,7 @@
     done->Jump(result);
 
   } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
     Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
     if (potential_slot != NULL) {
       // Generate fast case for locals that rewrite to slots.
@@ -4532,7 +4555,7 @@
           Result arguments = allocator()->Allocate();
           ASSERT(arguments.is_valid());
           __ movq(arguments.reg(),
-                  ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+                  ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
                                                     arguments,
                                                     slow));
           frame_->Push(&arguments);
@@ -4998,7 +5021,7 @@
   Comment cmnt(masm(), "[ Variable Assignment");
   Variable* var = node->target()->AsVariableProxy()->AsVariable();
   ASSERT(var != NULL);
-  Slot* slot = var->slot();
+  Slot* slot = var->AsSlot();
   ASSERT(slot != NULL);
 
   // Evaluate the right-hand side.
@@ -5343,14 +5366,14 @@
     // in generated code. If we succeed, there is no need to perform a
     // context lookup in the runtime system.
     JumpTarget done;
-    if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
-      ASSERT(var->slot()->type() == Slot::LOOKUP);
+    if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+      ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
       JumpTarget slow;
       // Prepare the stack for the call to
       // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
       // function, the first argument to the eval call and the
       // receiver.
-      Result fun = LoadFromGlobalSlotCheckExtensions(var->slot(),
+      Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
                                                      NOT_INSIDE_TYPEOF,
                                                      &slow);
       frame_->Push(&fun);
@@ -5434,8 +5457,8 @@
     // Replace the function on the stack with the result.
     frame_->Push(&result);
 
-  } else if (var != NULL && var->slot() != NULL &&
-             var->slot()->type() == Slot::LOOKUP) {
+  } else if (var != NULL && var->AsSlot() != NULL &&
+             var->AsSlot()->type() == Slot::LOOKUP) {
     // ----------------------------------
     // JavaScript examples:
     //
@@ -5454,7 +5477,7 @@
     // Generate fast case for loading functions from slots that
     // correspond to local/global variables or arguments unless they
     // are shadowed by eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(var->slot(),
+    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
                                     NOT_INSIDE_TYPEOF,
                                     &function,
                                     &slow,
@@ -7317,7 +7340,7 @@
 
     Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
     if (variable != NULL) {
-      Slot* slot = variable->slot();
+      Slot* slot = variable->AsSlot();
       if (variable->is_global()) {
         LoadGlobal();
         frame_->Push(variable->name());
@@ -7395,6 +7418,7 @@
         GenericUnaryOpStub stub(
             Token::SUB,
             overwrite,
+            NO_UNARY_FLAGS,
             no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
         Result operand = frame_->Pop();
         Result answer = frame_->CallStub(&stub, &operand);
@@ -7413,7 +7437,9 @@
         Condition is_smi = masm_->CheckSmi(operand.reg());
         smi_label.Branch(is_smi, &operand);
 
-        GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+        GenericUnaryOpStub stub(Token::BIT_NOT,
+                                overwrite,
+                                NO_UNARY_SMI_CODE_IN_STUB);
         Result answer = frame_->CallStub(&stub, &operand);
         continue_label.Jump(&answer);
 
@@ -8670,7 +8696,7 @@
   switch (type_) {
     case SLOT: {
       Comment cmnt(masm, "[ Load from Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
       ASSERT(slot != NULL);
       cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
       break;
@@ -8723,7 +8749,7 @@
     return;
   }
 
-  Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+  Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
   ASSERT(slot != NULL);
   if (slot->type() == Slot::LOOKUP ||
       slot->type() == Slot::CONTEXT ||
@@ -8756,7 +8782,7 @@
   switch (type_) {
     case SLOT: {
       Comment cmnt(masm, "[ Store to Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
       ASSERT(slot != NULL);
       cgen_->StoreToSlot(slot, init_state);
       set_unloaded();
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 07bdadf..6e1dd72 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -343,15 +343,17 @@
   bool in_spilled_code() const { return in_spilled_code_; }
   void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
 
-  // If the name is an inline runtime function call return the number of
-  // expected arguments. Otherwise return -1.
-  static int InlineRuntimeCallArgumentsCount(Handle<String> name);
-
   static Operand ContextOperand(Register context, int index) {
     return Operand(context, Context::SlotOffset(index));
   }
 
  private:
+  // Type of a member function that generates inline code for a native function.
+  typedef void (CodeGenerator::*InlineFunctionGenerator)
+      (ZoneList<Expression*>*);
+
+  static const InlineFunctionGenerator kInlineFunctionGenerators[];
+
   // Construction/Destruction
   explicit CodeGenerator(MacroAssembler* masm);
 
@@ -584,12 +586,9 @@
 
   void CheckStack();
 
-  struct InlineRuntimeLUT {
-    void (CodeGenerator::*method)(ZoneList<Expression*>*);
-    const char* name;
-    int nargs;
-  };
-  static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
+  static InlineFunctionGenerator FindInlineFunctionGenerator(
+      Runtime::FunctionId function_id);
+
   bool CheckForInlineRuntimeCall(CallRuntime* node);
 
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
@@ -742,8 +741,6 @@
   // in a spilled state.
   bool in_spilled_code_;
 
-  static InlineRuntimeLUT kInlineRuntimeLUT[];
-
   friend class VirtualFrame;
   friend class JumpTarget;
   friend class Reference;
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 7c9dfc1..4213912 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -891,6 +891,10 @@
 
     case 0xD9:
       switch (modrm_byte & 0xF8) {
+        case 0xC0:
+          mnem = "fld";
+          has_register = true;
+          break;
         case 0xC8:
           mnem = "fxch";
           has_register = true;
@@ -901,6 +905,7 @@
             case 0xE1: mnem = "fabs"; break;
             case 0xE4: mnem = "ftst"; break;
             case 0xE8: mnem = "fld1"; break;
+            case 0xEB: mnem = "fldpi"; break;
             case 0xEE: mnem = "fldz"; break;
             case 0xF5: mnem = "fprem1"; break;
             case 0xF7: mnem = "fincstp"; break;
@@ -1059,6 +1064,21 @@
       get_modrm(*current, &mod, &regop, &rm);
       AppendToBuffer("%sd %s,", mnemonic, NameOfXMMRegister(regop));
       current += PrintRightOperand(current);
+    } else if (opcode == 0x2C) {
+      // CVTTSD2SI:
+      // Convert with truncation scalar double-precision FP to integer.
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("cvttsd2si%c %s,",
+          operand_size_code(), NameOfCPURegister(regop));
+      current += PrintRightXMMOperand(current);
+    } else if (opcode == 0x2D) {
+      // CVTSD2SI: Convert scalar double-precision FP to integer.
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("cvtsd2si%c %s,",
+          operand_size_code(), NameOfCPURegister(regop));
+      current += PrintRightXMMOperand(current);
     } else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
       // XMM arithmetic. Mnemonic was retrieved at the start of this function.
       int mod, regop, rm;
@@ -1089,11 +1109,14 @@
       AppendToBuffer("%ss %s,", mnemonic, NameOfXMMRegister(regop));
       current += PrintRightOperand(current);
     } else if (opcode == 0x2C) {
-      // CVTTSS2SI: Convert scalar single-precision FP to dword integer.
+      // CVTTSS2SI:
+      // Convert with truncation scalar single-precision FP to dword integer.
       // Assert that mod is not 3, so source is memory, not an XMM register.
       ASSERT_NE(0xC0, *current & 0xC0);
       current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
     } else if (opcode == 0x5A) {
+      // CVTSS2SD:
+      // Convert scalar single-precision FP to scalar double-precision FP.
       int mod, regop, rm;
       get_modrm(*current, &mod, &regop, &rm);
       AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
@@ -1450,12 +1473,12 @@
         data++;
         int mod, regop, rm;
         get_modrm(*data, &mod, &regop, &rm);
-        if (mod == 3 && regop == 1) {
-          AppendToBuffer("decb %s", NameOfCPURegister(rm));
+        if (regop == 1) {
+          AppendToBuffer("decb ");
+          data += PrintRightOperand(data);
         } else {
           UnimplementedInstruction();
         }
-        data++;
       }
         break;
 
diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc
index fd26535..9c96047 100644
--- a/src/x64/frames-x64.cc
+++ b/src/x64/frames-x64.cc
@@ -35,18 +35,8 @@
 namespace internal {
 
 
-
-
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
-  if (fp == 0) return NONE;
-  // Compute the stack pointer.
-  Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-  // Fill in the state.
-  state->fp = fp;
-  state->sp = sp;
-  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
-  ASSERT(*state->pc_address != NULL);
-  return EXIT;
+Address ExitFrame::ComputeStackPointer(Address fp) {
+  return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
 }
 
 
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 40e1e35..c15860c 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -100,7 +100,7 @@
     // Copy any necessary parameters into the context.
     int num_parameters = scope()->num_parameters();
     for (int i = 0; i < num_parameters; i++) {
-      Slot* slot = scope()->parameter(i)->slot();
+      Slot* slot = scope()->parameter(i)->AsSlot();
       if (slot != NULL && slot->type() == Slot::CONTEXT) {
         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
             (num_parameters - 1 - i) * kPointerSize;
@@ -119,7 +119,7 @@
   }
 
   // Possibly allocate an arguments object.
-  Variable* arguments = scope()->arguments()->AsVariable();
+  Variable* arguments = scope()->arguments();
   if (arguments != NULL) {
     // Arguments object must be allocated after the context object, in
     // case the "arguments" or ".arguments" variables are in the context.
@@ -143,9 +143,8 @@
     __ CallStub(&stub);
     // Store new arguments object in both "arguments" and ".arguments" slots.
     __ movq(rcx, rax);
-    Move(arguments->slot(), rax, rbx, rdx);
-    Slot* dot_arguments_slot =
-        scope()->arguments_shadow()->AsVariable()->slot();
+    Move(arguments->AsSlot(), rax, rbx, rdx);
+    Slot* dot_arguments_slot = scope()->arguments_shadow()->AsSlot();
     Move(dot_arguments_slot, rcx, rbx, rdx);
   }
 
@@ -165,7 +164,7 @@
   }
 
   { Comment cmnt(masm_, "[ Stack check");
-    Label ok;
+    NearLabel ok;
     __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
     __ j(above_equal, &ok);
     StackCheckStub stub;
@@ -237,221 +236,196 @@
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
+void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
+}
 
-    case Expression::kEffect:
-      // Nothing to do.
-      break;
 
-    case Expression::kValue:
-      // Move value into place.
-      switch (location_) {
-        case kAccumulator:
-          if (!reg.is(result_register())) __ movq(result_register(), reg);
-          break;
-        case kStack:
-          __ push(reg);
-          break;
-      }
-      break;
+void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
+  MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
+  __ movq(result_register(), slot_operand);
+}
 
-    case Expression::kTest:
-      // For simplicity we always test the accumulator register.
-      if (!reg.is(result_register())) __ movq(result_register(), reg);
-      DoTest(true_label_, false_label_, fall_through_);
-      break;
+
+void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
+  MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
+  __ push(slot_operand);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
+  codegen()->Move(result_register(), slot);
+  codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Heap::RootListIndex index) const {
+  __ LoadRoot(result_register(), index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Heap::RootListIndex index) const {
+  __ PushRoot(index);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+  if (index == Heap::kUndefinedValueRootIndex ||
+      index == Heap::kNullValueRootIndex ||
+      index == Heap::kFalseValueRootIndex) {
+    __ jmp(false_label_);
+  } else if (index == Heap::kTrueValueRootIndex) {
+    __ jmp(true_label_);
+  } else {
+    __ LoadRoot(result_register(), index);
+    codegen()->DoTest(true_label_, false_label_, fall_through_);
   }
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-    case Expression::kEffect:
-      // Nothing to do.
-      break;
-    case Expression::kValue: {
-      MemOperand slot_operand = EmitSlotSearch(slot, result_register());
-      switch (location_) {
-        case kAccumulator:
-          __ movq(result_register(), slot_operand);
-          break;
-        case kStack:
-          // Memory operands can be pushed directly.
-          __ push(slot_operand);
-          break;
-      }
-      break;
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Handle<Object> lit) const {
+  __ Move(result_register(), lit);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+  __ Push(lit);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+    __ jmp(false_label_);
+  } else if (lit->IsTrue() || lit->IsJSObject()) {
+    __ jmp(true_label_);
+  } else if (lit->IsString()) {
+    if (String::cast(*lit)->length() == 0) {
+      __ jmp(false_label_);
+    } else {
+      __ jmp(true_label_);
     }
-
-    case Expression::kTest:
-      Move(result_register(), slot);
-      DoTest(true_label_, false_label_, fall_through_);
-      break;
+  } else if (lit->IsSmi()) {
+    if (Smi::cast(*lit)->value() == 0) {
+      __ jmp(false_label_);
+    } else {
+      __ jmp(true_label_);
+    }
+  } else {
+    // For simplicity we always test the accumulator register.
+    __ Move(result_register(), lit);
+    codegen()->DoTest(true_label_, false_label_, fall_through_);
   }
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-    case Expression::kEffect:
-      // Nothing to do.
-      break;
-    case Expression::kValue:
-      switch (location_) {
-        case kAccumulator:
-          __ Move(result_register(), lit->handle());
-          break;
-        case kStack:
-          __ Push(lit->handle());
-          break;
-      }
-      break;
-
-    case Expression::kTest:
-      __ Move(result_register(), lit->handle());
-      DoTest(true_label_, false_label_, fall_through_);
-      break;
-  }
-}
-
-
-void FullCodeGenerator::ApplyTOS(Expression::Context context) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-
-    case Expression::kEffect:
-      __ Drop(1);
-      break;
-
-    case Expression::kValue:
-      switch (location_) {
-        case kAccumulator:
-          __ pop(result_register());
-          break;
-        case kStack:
-          break;
-      }
-      break;
-
-    case Expression::kTest:
-      __ pop(result_register());
-      DoTest(true_label_, false_label_, fall_through_);
-      break;
-  }
-}
-
-
-void FullCodeGenerator::DropAndApply(int count,
-                                     Expression::Context context,
-                                     Register reg) {
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+                                                   Register reg) const {
   ASSERT(count > 0);
-  ASSERT(!reg.is(rsp));
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-
-    case Expression::kEffect:
-      __ Drop(count);
-      break;
-
-    case Expression::kValue:
-      switch (location_) {
-        case kAccumulator:
-          __ Drop(count);
-          if (!reg.is(result_register())) __ movq(result_register(), reg);
-          break;
-        case kStack:
-          if (count > 1) __ Drop(count - 1);
-          __ movq(Operand(rsp, 0), reg);
-          break;
-      }
-      break;
-
-    case Expression::kTest:
-      __ Drop(count);
-      if (!reg.is(result_register())) __ movq(result_register(), reg);
-      DoTest(true_label_, false_label_, fall_through_);
-      break;
-  }
+  __ Drop(count);
 }
 
 
-void FullCodeGenerator::Apply(Expression::Context context,
-                              Label* materialize_true,
-                              Label* materialize_false) {
-  switch (context) {
-    case Expression::kUninitialized:
-
-    case Expression::kEffect:
-      ASSERT_EQ(materialize_true, materialize_false);
-      __ bind(materialize_true);
-      break;
-
-    case Expression::kValue: {
-      Label done;
-      switch (location_) {
-        case kAccumulator:
-          __ bind(materialize_true);
-          __ Move(result_register(), Factory::true_value());
-          __ jmp(&done);
-          __ bind(materialize_false);
-          __ Move(result_register(), Factory::false_value());
-          break;
-        case kStack:
-          __ bind(materialize_true);
-          __ Push(Factory::true_value());
-          __ jmp(&done);
-          __ bind(materialize_false);
-          __ Push(Factory::false_value());
-          break;
-      }
-      __ bind(&done);
-      break;
-    }
-
-    case Expression::kTest:
-      break;
-  }
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+    int count,
+    Register reg) const {
+  ASSERT(count > 0);
+  __ Drop(count);
+  __ Move(result_register(), reg);
 }
 
 
-// Convert constant control flow (true or false) to the result expected for
-// a given expression context.
-void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
-  switch (context) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-      break;
-    case Expression::kEffect:
-      break;
-    case Expression::kValue: {
-      Heap::RootListIndex value_root_index =
-          flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
-      switch (location_) {
-        case kAccumulator:
-          __ LoadRoot(result_register(), value_root_index);
-          break;
-        case kStack:
-          __ PushRoot(value_root_index);
-          break;
-      }
-      break;
-    }
-    case Expression::kTest:
-      if (flag) {
-        if (true_label_ != fall_through_) __ jmp(true_label_);
-      } else {
-        if (false_label_ != fall_through_) __ jmp(false_label_);
-      }
-      break;
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+                                                       Register reg) const {
+  ASSERT(count > 0);
+  if (count > 1) __ Drop(count - 1);
+  __ movq(Operand(rsp, 0), reg);
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+                                                 Register reg) const {
+  ASSERT(count > 0);
+  // For simplicity we always test the accumulator register.
+  __ Drop(count);
+  __ Move(result_register(), reg);
+  codegen()->DoTest(true_label_, false_label_, fall_through_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+                                            Label* materialize_false) const {
+  ASSERT_EQ(materialize_true, materialize_false);
+  __ bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Label* materialize_true,
+    Label* materialize_false) const {
+  NearLabel done;
+  __ bind(materialize_true);
+  __ Move(result_register(), Factory::true_value());
+  __ jmp(&done);
+  __ bind(materialize_false);
+  __ Move(result_register(), Factory::false_value());
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Label* materialize_true,
+    Label* materialize_false) const {
+  NearLabel done;
+  __ bind(materialize_true);
+  __ Push(Factory::true_value());
+  __ jmp(&done);
+  __ bind(materialize_false);
+  __ Push(Factory::false_value());
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+                                          Label* materialize_false) const {
+  ASSERT(materialize_false == false_label_);
+  ASSERT(materialize_true == true_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ PushRoot(value_root_index);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  if (flag) {
+    if (true_label_ != fall_through_) __ jmp(true_label_);
+  } else {
+    if (false_label_ != fall_through_) __ jmp(false_label_);
   }
 }
 
@@ -544,7 +518,7 @@
                                         FunctionLiteral* function) {
   Comment cmnt(masm_, "[ Declaration");
   ASSERT(variable != NULL);  // Must have been resolved.
-  Slot* slot = variable->slot();
+  Slot* slot = variable->AsSlot();
   Property* prop = variable->AsProperty();
 
   if (slot != NULL) {
@@ -555,7 +529,7 @@
           __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
           __ movq(Operand(rbp, SlotOffset(slot)), kScratchRegister);
         } else if (function != NULL) {
-          VisitForValue(function, kAccumulator);
+          VisitForAccumulatorValue(function);
           __ movq(Operand(rbp, SlotOffset(slot)), result_register());
         }
         break;
@@ -577,7 +551,7 @@
           __ movq(ContextOperand(rsi, slot->index()), kScratchRegister);
           // No write barrier since the hole value is in old space.
         } else if (function != NULL) {
-          VisitForValue(function, kAccumulator);
+          VisitForAccumulatorValue(function);
           __ movq(ContextOperand(rsi, slot->index()), result_register());
           int offset = Context::SlotOffset(slot->index());
           __ movq(rbx, rsi);
@@ -599,7 +573,7 @@
         if (mode == Variable::CONST) {
           __ PushRoot(Heap::kTheHoleValueRootIndex);
         } else if (function != NULL) {
-          VisitForValue(function, kStack);
+          VisitForStackValue(function);
         } else {
           __ Push(Smi::FromInt(0));  // no initial value!
         }
@@ -612,23 +586,20 @@
     if (function != NULL || mode == Variable::CONST) {
       // We are declaring a function or constant that rewrites to a
       // property.  Use (keyed) IC to set the initial value.
-      VisitForValue(prop->obj(), kStack);
+      VisitForStackValue(prop->obj());
       if (function != NULL) {
-        VisitForValue(prop->key(), kStack);
-        VisitForValue(function, kAccumulator);
+        VisitForStackValue(prop->key());
+        VisitForAccumulatorValue(function);
         __ pop(rcx);
       } else {
-        VisitForValue(prop->key(), kAccumulator);
+        VisitForAccumulatorValue(prop->key());
         __ movq(rcx, result_register());
         __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
       }
       __ pop(rdx);
 
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // Absence of a test rax instruction following the call
-      // indicates that none of the load was inlined.
-      __ nop();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
     }
   }
 }
@@ -654,7 +625,7 @@
   Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
   // Keep the switch value on the stack until a case matches.
-  VisitForValue(stmt->tag(), kStack);
+  VisitForStackValue(stmt->tag());
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -674,12 +645,13 @@
     next_test.Unuse();
 
     // Compile the label expression.
-    VisitForValue(clause->label(), kAccumulator);
+    VisitForAccumulatorValue(clause->label());
 
     // Perform the comparison as if via '==='.
-    if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+    __ movq(rdx, Operand(rsp, 0));  // Switch value.
+    bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+    if (inline_smi_code) {
       Label slow_case;
-      __ movq(rdx, Operand(rsp, 0));  // Switch value.
       __ JumpIfNotBothSmi(rdx, rax, &slow_case);
       __ SmiCompare(rdx, rax);
       __ j(not_equal, &next_test);
@@ -688,7 +660,10 @@
       __ bind(&slow_case);
     }
 
-    CompareStub stub(equal, true);
+    CompareFlags flags = inline_smi_code
+        ? NO_SMI_COMPARE_IN_STUB
+        : NO_COMPARE_FLAGS;
+    CompareStub stub(equal, true, flags);
     __ CallStub(&stub);
     __ testq(rax, rax);
     __ j(not_equal, &next_test);
@@ -729,7 +704,7 @@
   // Get the object to enumerate over. Both SpiderMonkey and JSC
   // ignore null and undefined in contrast to the specification; see
   // ECMA-262 section 12.6.4.
-  VisitForValue(stmt->enumerable(), kAccumulator);
+  VisitForAccumulatorValue(stmt->enumerable());
   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
   __ j(equal, &exit);
   __ CompareRoot(rax, Heap::kNullValueRootIndex);
@@ -758,7 +733,7 @@
   // If we got a map from the runtime call, we can do a fast
   // modification check. Otherwise, we got a fixed array, and we have
   // to do a slow check.
-  Label fixed_array;
+  NearLabel fixed_array;
   __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
                  Heap::kMetaMapRootIndex);
   __ j(not_equal, &fixed_array);
@@ -804,7 +779,7 @@
 
   // Check if the expected map still matches that of the enumerable.
   // If not, we have to filter the key.
-  Label update_each;
+  NearLabel update_each;
   __ movq(rcx, Operand(rsp, 4 * kPointerSize));
   __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
   __ j(equal, &update_each);
@@ -867,13 +842,13 @@
     __ Push(info);
     __ CallRuntime(Runtime::kNewClosure, 2);
   }
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
-  EmitVariableLoad(expr->var(), context_);
+  EmitVariableLoad(expr->var());
 }
 
 
@@ -909,7 +884,7 @@
   if (s != NULL && s->is_eval_scope()) {
     // Loop up the context chain.  There is no frame effect so it is
     // safe to use raw labels here.
-    Label next, fast;
+    NearLabel next, fast;
     if (!context.is(temp)) {
       __ movq(temp, context);
     }
@@ -937,7 +912,7 @@
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
-  __ call(ic, mode);
+  EmitCallIC(ic, mode);
 }
 
 
@@ -984,7 +959,7 @@
     EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
     __ jmp(done);
   } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
     Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
     if (potential_slot != NULL) {
       // Generate fast case for locals that rewrite to slots.
@@ -1010,11 +985,11 @@
           // variables. Then load the argument from the arguments
           // object using keyed load.
           __ movq(rdx,
-                  ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+                  ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
                                                     slow));
           __ Move(rax, key_literal->handle());
           Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-          __ call(ic, RelocInfo::CODE_TARGET);
+          EmitCallIC(ic, RelocInfo::CODE_TARGET);
           __ jmp(done);
         }
       }
@@ -1023,12 +998,11 @@
 }
 
 
-void FullCodeGenerator::EmitVariableLoad(Variable* var,
-                                         Expression::Context context) {
+void FullCodeGenerator::EmitVariableLoad(Variable* var) {
   // Four cases: non-this global variables, lookup slots, all other
   // types of slots, and parameters that rewrite to explicit property
   // accesses on the arguments object.
-  Slot* slot = var->slot();
+  Slot* slot = var->AsSlot();
   Property* property = var->AsProperty();
 
   if (var->is_global() && !var->is_this()) {
@@ -1038,12 +1012,8 @@
     __ Move(rcx, var->name());
     __ movq(rax, CodeGenerator::GlobalObject());
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-    // A test rax instruction following the call is used by the IC to
-    // indicate that the inobject property case was inlined.  Ensure there
-    // is no test rax instruction here.
-    __ nop();
-    Apply(context, rax);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    context()->Plug(rax);
 
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
     Label done, slow;
@@ -1059,7 +1029,7 @@
     __ CallRuntime(Runtime::kLoadContextSlot, 2);
     __ bind(&done);
 
-    Apply(context, rax);
+    context()->Plug(rax);
 
   } else if (slot != NULL) {
     Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
@@ -1068,16 +1038,16 @@
     if (var->mode() == Variable::CONST) {
       // Constants may be the hole value if they have not been initialized.
       // Unhole them.
-      Label done;
+      NearLabel done;
       MemOperand slot_operand = EmitSlotSearch(slot, rax);
       __ movq(rax, slot_operand);
       __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
       __ j(not_equal, &done);
       __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
       __ bind(&done);
-      Apply(context, rax);
+      context()->Plug(rax);
     } else {
-      Apply(context, slot);
+      context()->Plug(slot);
     }
 
   } else {
@@ -1088,7 +1058,7 @@
     // Assert that the object is in a slot.
     Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
     ASSERT_NOT_NULL(object_var);
-    Slot* object_slot = object_var->slot();
+    Slot* object_slot = object_var->AsSlot();
     ASSERT_NOT_NULL(object_slot);
 
     // Load the object.
@@ -1105,11 +1075,8 @@
 
     // Do a keyed property load.
     Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-    __ call(ic, RelocInfo::CODE_TARGET);
-    // Notice: We must not have a "test rax, ..." instruction after the
-    // call. It is treated specially by the LoadIC code.
-    __ nop();
-    Apply(context, rax);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    context()->Plug(rax);
   }
 }
 
@@ -1164,7 +1131,7 @@
     __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
     __ movq(FieldOperand(rax, size - kPointerSize), rdx);
   }
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -1203,38 +1170,37 @@
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
-          VisitForValue(value, kAccumulator);
+          VisitForAccumulatorValue(value);
           __ Move(rcx, key->handle());
           __ movq(rdx, Operand(rsp, 0));
           Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-          __ call(ic, RelocInfo::CODE_TARGET);
-          __ nop();
+          EmitCallIC(ic, RelocInfo::CODE_TARGET);
           break;
         }
         // Fall through.
       case ObjectLiteral::Property::PROTOTYPE:
         __ push(Operand(rsp, 0));  // Duplicate receiver.
-        VisitForValue(key, kStack);
-        VisitForValue(value, kStack);
+        VisitForStackValue(key);
+        VisitForStackValue(value);
         __ CallRuntime(Runtime::kSetProperty, 3);
         break;
       case ObjectLiteral::Property::SETTER:
       case ObjectLiteral::Property::GETTER:
         __ push(Operand(rsp, 0));  // Duplicate receiver.
-        VisitForValue(key, kStack);
+        VisitForStackValue(key);
         __ Push(property->kind() == ObjectLiteral::Property::SETTER ?
                 Smi::FromInt(1) :
                 Smi::FromInt(0));
-        VisitForValue(value, kStack);
+        VisitForStackValue(value);
         __ CallRuntime(Runtime::kDefineAccessor, 4);
         break;
     }
   }
 
   if (result_saved) {
-    ApplyTOS(context_);
+    context()->PlugTOS();
   } else {
-    Apply(context_, rax);
+    context()->Plug(rax);
   }
 }
 
@@ -1281,7 +1247,7 @@
       __ push(rax);
       result_saved = true;
     }
-    VisitForValue(subexpr, kAccumulator);
+    VisitForAccumulatorValue(subexpr);
 
     // Store the subexpression value in the array's elements.
     __ movq(rbx, Operand(rsp, 0));  // Copy of array literal.
@@ -1294,9 +1260,9 @@
   }
 
   if (result_saved) {
-    ApplyTOS(context_);
+    context()->PlugTOS();
   } else {
-    Apply(context_, rax);
+    context()->Plug(rax);
   }
 }
 
@@ -1329,39 +1295,38 @@
     case NAMED_PROPERTY:
       if (expr->is_compound()) {
         // We need the receiver both on the stack and in the accumulator.
-        VisitForValue(property->obj(), kAccumulator);
+        VisitForAccumulatorValue(property->obj());
         __ push(result_register());
       } else {
-        VisitForValue(property->obj(), kStack);
+        VisitForStackValue(property->obj());
       }
       break;
     case KEYED_PROPERTY:
       if (expr->is_compound()) {
-        VisitForValue(property->obj(), kStack);
-        VisitForValue(property->key(), kAccumulator);
+        VisitForStackValue(property->obj());
+        VisitForAccumulatorValue(property->key());
         __ movq(rdx, Operand(rsp, 0));
         __ push(rax);
       } else {
-        VisitForValue(property->obj(), kStack);
-        VisitForValue(property->key(), kStack);
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
       }
       break;
   }
 
   if (expr->is_compound()) {
-    Location saved_location = location_;
-    location_ = kAccumulator;
-    switch (assign_type) {
-      case VARIABLE:
-        EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
-                         Expression::kValue);
-        break;
-      case NAMED_PROPERTY:
-        EmitNamedPropertyLoad(property);
-        break;
-      case KEYED_PROPERTY:
-        EmitKeyedPropertyLoad(property);
-        break;
+    { AccumulatorValueContext context(this);
+      switch (assign_type) {
+        case VARIABLE:
+          EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          break;
+        case NAMED_PROPERTY:
+          EmitNamedPropertyLoad(property);
+          break;
+        case KEYED_PROPERTY:
+          EmitKeyedPropertyLoad(property);
+          break;
+      }
     }
 
     Token::Value op = expr->binary_op();
@@ -1371,28 +1336,26 @@
     ASSERT(constant == kRightConstant || constant == kNoConstants);
     if (constant == kNoConstants) {
       __ push(rax);  // Left operand goes on the stack.
-      VisitForValue(expr->value(), kAccumulator);
+      VisitForAccumulatorValue(expr->value());
     }
 
     OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
         ? OVERWRITE_RIGHT
         : NO_OVERWRITE;
     SetSourcePosition(expr->position() + 1);
+    AccumulatorValueContext context(this);
     if (ShouldInlineSmiCase(op)) {
       EmitInlineSmiBinaryOp(expr,
                             op,
-                            Expression::kValue,
                             mode,
                             expr->target(),
                             expr->value(),
                             constant);
     } else {
-      EmitBinaryOp(op, Expression::kValue, mode);
+      EmitBinaryOp(op, mode);
     }
-    location_ = saved_location;
-
   } else {
-    VisitForValue(expr->value(), kAccumulator);
+    VisitForAccumulatorValue(expr->value());
   }
 
   // Record source position before possible IC call.
@@ -1402,8 +1365,7 @@
   switch (assign_type) {
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             expr->op(),
-                             context_);
+                             expr->op());
       break;
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
@@ -1420,22 +1382,19 @@
   Literal* key = prop->key()->AsLiteral();
   __ Move(rcx, key->handle());
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 }
 
 
 void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
                                               Token::Value op,
-                                              Expression::Context context,
                                               OverwriteMode mode,
                                               Expression* left,
                                               Expression* right,
@@ -1497,12 +1456,11 @@
   }
 
   __ bind(&done);
-  Apply(context, rax);
+  context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::EmitBinaryOp(Token::Value op,
-                                     Expression::Context context,
                                      OverwriteMode mode) {
   GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS);
   if (stub.ArgsInRegistersSupported()) {
@@ -1512,7 +1470,7 @@
     __ push(result_register());
     __ CallStub(&stub);
   }
-  Apply(context, rax);
+  context()->Plug(rax);
 }
 
 
@@ -1538,30 +1496,29 @@
   switch (assign_type) {
     case VARIABLE: {
       Variable* var = expr->AsVariableProxy()->var();
-      EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect);
+      EffectContext context(this);
+      EmitVariableAssignment(var, Token::ASSIGN);
       break;
     }
     case NAMED_PROPERTY: {
       __ push(rax);  // Preserve value.
-      VisitForValue(prop->obj(), kAccumulator);
+      VisitForAccumulatorValue(prop->obj());
       __ movq(rdx, rax);
       __ pop(rax);  // Restore value.
       __ Move(rcx, prop->key()->AsLiteral()->handle());
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      __ nop();  // Signal no inlined code.
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
     case KEYED_PROPERTY: {
       __ push(rax);  // Preserve value.
-      VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kAccumulator);
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
       __ movq(rcx, rax);
       __ pop(rdx);
       __ pop(rax);
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      __ nop();  // Signal no inlined code.
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
   }
@@ -1569,12 +1526,11 @@
 
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
-                                               Token::Value op,
-                                               Expression::Context context) {
+                                               Token::Value op) {
   // Left-hand sides that rewrite to explicit property accesses do not reach
   // here.
   ASSERT(var != NULL);
-  ASSERT(var->is_global() || var->slot() != NULL);
+  ASSERT(var->is_global() || var->AsSlot() != NULL);
 
   if (var->is_global()) {
     ASSERT(!var->is_this());
@@ -1584,14 +1540,13 @@
     __ Move(rcx, var->name());
     __ movq(rdx, CodeGenerator::GlobalObject());
     Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    __ nop();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
     // Perform the assignment for non-const variables and for initialization
     // of const variables.  Const assignments are simply skipped.
     Label done;
-    Slot* slot = var->slot();
+    Slot* slot = var->AsSlot();
     switch (slot->type()) {
       case Slot::PARAMETER:
       case Slot::LOCAL:
@@ -1640,7 +1595,7 @@
     __ bind(&done);
   }
 
-  Apply(context, rax);
+  context()->Plug(rax);
 }
 
 
@@ -1669,8 +1624,7 @@
     __ pop(rdx);
   }
   Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1678,9 +1632,9 @@
     __ push(Operand(rsp, kPointerSize));  // Receiver is under value.
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(rax);
-    DropAndApply(1, context_, rax);
+    context()->DropAndPlug(1, rax);
   } else {
-    Apply(context_, rax);
+    context()->Plug(rax);
   }
 }
 
@@ -1708,10 +1662,7 @@
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // This nop signals to the IC that there is no inlined code at the call
-  // site for it to patch.
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1722,7 +1673,7 @@
     __ pop(rax);
   }
 
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -1731,16 +1682,15 @@
   Expression* key = expr->key();
 
   if (key->IsPropertyName()) {
-    VisitForValue(expr->obj(), kAccumulator);
+    VisitForAccumulatorValue(expr->obj());
     EmitNamedPropertyLoad(expr);
-    Apply(context_, rax);
   } else {
-    VisitForValue(expr->obj(), kStack);
-    VisitForValue(expr->key(), kAccumulator);
+    VisitForStackValue(expr->obj());
+    VisitForAccumulatorValue(expr->key());
     __ pop(rdx);
     EmitKeyedPropertyLoad(expr);
-    Apply(context_, rax);
   }
+  context()->Plug(rax);
 }
 
 
@@ -1751,7 +1701,7 @@
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
   __ Move(rcx, name);
   // Record source position for debugger.
@@ -1760,10 +1710,10 @@
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
                                                          in_loop);
-  __ Call(ic, mode);
+  EmitCallIC(ic, mode);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -1774,9 +1724,9 @@
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
-  VisitForValue(key, kAccumulator);
+  VisitForAccumulatorValue(key);
   __ movq(rcx, rax);
   // Record source position for debugger.
   SetSourcePosition(expr->position());
@@ -1784,10 +1734,10 @@
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
                                                               in_loop);
-  __ Call(ic, mode);
+  EmitCallIC(ic, mode);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -1796,7 +1746,7 @@
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
@@ -1806,7 +1756,7 @@
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   // Discard the function left on TOS.
-  DropAndApply(1, context_, rax);
+  context()->DropAndPlug(1, rax);
 }
 
 
@@ -1820,14 +1770,14 @@
     // resolve the function we need to call and the receiver of the
     // call.  The we call the resolved function using the given
     // arguments.
-    VisitForValue(fun, kStack);
+    VisitForStackValue(fun);
     __ PushRoot(Heap::kUndefinedValueRootIndex);  // Reserved receiver slot.
 
     // Push the arguments.
     ZoneList<Expression*>* args = expr->arguments();
     int arg_count = args->length();
     for (int i = 0; i < arg_count; i++) {
-      VisitForValue(args->at(i), kStack);
+      VisitForStackValue(args->at(i));
     }
 
     // Push copy of the function - found below the arguments.
@@ -1856,20 +1806,20 @@
     __ CallStub(&stub);
     // Restore context register.
     __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-    DropAndApply(1, context_, rax);
+    context()->DropAndPlug(1, rax);
   } else if (var != NULL && !var->is_this() && var->is_global()) {
     // Call to a global variable.
     // Push global object as receiver for the call IC lookup.
     __ push(CodeGenerator::GlobalObject());
     EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
-  } else if (var != NULL && var->slot() != NULL &&
-             var->slot()->type() == Slot::LOOKUP) {
+  } else if (var != NULL && var->AsSlot() != NULL &&
+             var->AsSlot()->type() == Slot::LOOKUP) {
     // Call to a lookup slot (dynamically introduced variable).
     Label slow, done;
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    EmitDynamicLoadFromSlotFastCase(var->slot(),
+    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
                                     NOT_INSIDE_TYPEOF,
                                     &slow,
                                     &done);
@@ -1887,7 +1837,7 @@
     // function and receiver and have the slow path jump around this
     // code.
     if (done.is_linked()) {
-      Label call;
+      NearLabel call;
       __ jmp(&call);
       __ bind(&done);
       // Push function.
@@ -1906,24 +1856,20 @@
     Literal* key = prop->key()->AsLiteral();
     if (key != NULL && key->handle()->IsSymbol()) {
       // Call to a named property, use call IC.
-      VisitForValue(prop->obj(), kStack);
+      VisitForStackValue(prop->obj());
       EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
     } else {
       // Call to a keyed property.
       // For a synthetic property use keyed load IC followed by function call,
       // for a regular property use KeyedCallIC.
-      VisitForValue(prop->obj(), kStack);
+      VisitForStackValue(prop->obj());
       if (prop->is_synthetic()) {
-        VisitForValue(prop->key(), kAccumulator);
+        VisitForAccumulatorValue(prop->key());
         __ movq(rdx, Operand(rsp, 0));
         // Record source code position for IC call.
         SetSourcePosition(prop->position());
         Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-        __ call(ic, RelocInfo::CODE_TARGET);
-        // By emitting a nop we make sure that we do not have a "test rax,..."
-        // instruction after the call as it is treated specially
-        // by the LoadIC code.
-        __ nop();
+        EmitCallIC(ic, RelocInfo::CODE_TARGET);
         // Pop receiver.
         __ pop(rbx);
         // Push result (function).
@@ -1946,7 +1892,7 @@
         loop_depth() == 0) {
       lit->set_try_full_codegen(true);
     }
-    VisitForValue(fun, kStack);
+    VisitForStackValue(fun);
     // Load global receiver object.
     __ movq(rbx, CodeGenerator::GlobalObject());
     __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
@@ -1965,13 +1911,13 @@
   // Push constructor on the stack.  If it's not a function it's used as
   // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
   // ignored.
-  VisitForValue(expr->expression(), kStack);
+  VisitForStackValue(expr->expression());
 
   // Push the arguments ("left-to-right") on the stack.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
 
   // Call the construct call builtin that handles allocation and
@@ -1984,59 +1930,59 @@
 
   Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
   __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ JumpIfSmi(rax, if_true);
   __ jmp(if_false);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   Condition positive_smi = __ CheckPositiveSmi(rax);
   Split(positive_smi, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ JumpIfSmi(rax, if_false);
   __ CompareRoot(rax, Heap::kNullValueRootIndex);
@@ -2052,41 +1998,41 @@
   __ cmpq(rbx, Immediate(LAST_JS_OBJECT_TYPE));
   Split(below_equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
   Split(above_equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ JumpIfSmi(rax, if_false);
   __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
@@ -2094,7 +2040,7 @@
            Immediate(1 << Map::kIsUndetectable));
   Split(not_zero, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -2102,80 +2048,80 @@
     ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
   // used in a few functions in runtime.js which should not normally be hit by
   // this compiler.
   __ jmp(if_false);
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
   Split(equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
   Split(equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
   Split(equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -2187,8 +2133,8 @@
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   // Get the frame pointer for the calling frame.
   __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
@@ -2206,7 +2152,7 @@
                 Smi::FromInt(StackFrame::CONSTRUCT));
   Split(equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -2214,21 +2160,21 @@
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ pop(rbx);
   __ cmpq(rax, rbx);
   Split(equal, if_true, if_false, fall_through);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -2237,19 +2183,19 @@
 
   // ArgumentsAccessStub expects the key in rdx and the formal
   // parameter count in rax.
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
   __ movq(rdx, rax);
   __ Move(rax, Smi::FromInt(scope()->num_parameters()));
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
   __ CallStub(&stub);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
 
-  Label exit;
+  NearLabel exit;
   // Get the number of formal parameters.
   __ Move(rax, Smi::FromInt(scope()->num_parameters()));
 
@@ -2265,7 +2211,7 @@
 
   __ bind(&exit);
   if (FLAG_debug_code) __ AbortIfNotSmi(rax);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -2273,7 +2219,7 @@
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   // If the object is a smi, we return null.
   __ JumpIfSmi(rax, &null);
@@ -2319,7 +2265,7 @@
   // All done.
   __ bind(&done);
 
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -2334,14 +2280,14 @@
   ASSERT_EQ(args->length(), 3);
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
-    VisitForValue(args->at(1), kStack);
-    VisitForValue(args->at(2), kStack);
+    VisitForStackValue(args->at(1));
+    VisitForStackValue(args->at(2));
     __ CallRuntime(Runtime::kLog, 2);
   }
 #endif
   // Finally, we're expected to leave a value on the top of the stack.
   __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -2378,7 +2324,7 @@
   __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
 
   __ movq(rax, rbx);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -2386,11 +2332,11 @@
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
   ASSERT(args->length() == 3);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
-  VisitForValue(args->at(2), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
   __ CallStub(&stub);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -2398,19 +2344,19 @@
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
   ASSERT(args->length() == 4);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
-  VisitForValue(args->at(2), kStack);
-  VisitForValue(args->at(3), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  VisitForStackValue(args->at(3));
   __ CallStub(&stub);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);  // Load the object.
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
 
   Label done;
   // If the object is a smi return the object.
@@ -2421,25 +2367,25 @@
   __ movq(rax, FieldOperand(rax, JSValue::kValueOffset));
 
   __ bind(&done);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the runtime function.
   ASSERT(args->length() == 2);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
   __ CallRuntime(Runtime::kMath_pow, 2);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
-  VisitForValue(args->at(0), kStack);  // Load the object.
-  VisitForValue(args->at(1), kAccumulator);  // Load the value.
+  VisitForStackValue(args->at(0));  // Load the object.
+  VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(rbx);  // rax = value. rbx = object.
 
   Label done;
@@ -2458,7 +2404,7 @@
   __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx);
 
   __ bind(&done);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -2466,18 +2412,18 @@
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
-  VisitForValue(args->at(0), kStack);
+  VisitForStackValue(args->at(0));
 
   NumberToStringStub stub;
   __ CallStub(&stub);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label done;
   StringCharFromCodeGenerator generator(rax, rbx);
@@ -2488,15 +2434,15 @@
   generator.GenerateSlow(masm_, call_helper);
 
   __ bind(&done);
-  Apply(context_, rbx);
+  context()->Plug(rbx);
 }
 
 
 void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
 
   Register object = rbx;
   Register index = rax;
@@ -2535,15 +2481,15 @@
   generator.GenerateSlow(masm_, call_helper);
 
   __ bind(&done);
-  Apply(context_, result);
+  context()->Plug(result);
 }
 
 
 void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
 
   Register object = rbx;
   Register index = rax;
@@ -2584,31 +2530,31 @@
   generator.GenerateSlow(masm_, call_helper);
 
   __ bind(&done);
-  Apply(context_, result);
+  context()->Plug(result);
 }
 
 
 void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
 
   StringAddStub stub(NO_STRING_ADD_FLAGS);
   __ CallStub(&stub);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
 
   StringCompareStub stub;
   __ CallStub(&stub);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -2616,9 +2562,9 @@
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN);
   ASSERT(args->length() == 1);
-  VisitForValue(args->at(0), kStack);
+  VisitForStackValue(args->at(0));
   __ CallStub(&stub);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -2626,18 +2572,18 @@
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS);
   ASSERT(args->length() == 1);
-  VisitForValue(args->at(0), kStack);
+  VisitForStackValue(args->at(0));
   __ CallStub(&stub);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the runtime function.
   ASSERT(args->length() == 1);
-  VisitForValue(args->at(0), kStack);
+  VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -2645,38 +2591,38 @@
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // For receiver and function.
-  VisitForValue(args->at(0), kStack);  // Receiver.
+  VisitForStackValue(args->at(0));  // Receiver.
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i + 1), kStack);
+    VisitForStackValue(args->at(i + 1));
   }
-  VisitForValue(args->at(arg_count + 1), kAccumulator);  // Function.
+  VisitForAccumulatorValue(args->at(arg_count + 1));  // Function.
 
   // InvokeFunction requires function in rdi. Move it in there.
   if (!result_register().is(rdi)) __ movq(rdi, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(rdi, count, CALL_FUNCTION);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 3);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
-  VisitForValue(args->at(2), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
   __ CallRuntime(Runtime::kRegExpConstructResult, 3);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 3);
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kStack);
-  VisitForValue(args->at(2), kStack);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
   __ CallRuntime(Runtime::kSwapElements, 3);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -2691,11 +2637,11 @@
   if (jsfunction_result_caches->length() <= cache_id) {
     __ Abort("Attempt to use undefined cache.");
     __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-    Apply(context_, rax);
+    context()->Plug(rax);
     return;
   }
 
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForAccumulatorValue(args->at(1));
 
   Register key = rax;
   Register cache = rbx;
@@ -2708,7 +2654,7 @@
   __ movq(cache,
           FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
 
-  Label done, not_found;
+  NearLabel done, not_found;
   // tmp now holds finger offset as a smi.
   ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   __ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
@@ -2732,7 +2678,7 @@
   __ CallRuntime(Runtime::kGetFromCache, 2);
 
   __ bind(&done);
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -2743,11 +2689,11 @@
   Register left = rbx;
   Register tmp = rcx;
 
-  VisitForValue(args->at(0), kStack);
-  VisitForValue(args->at(1), kAccumulator);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
   __ pop(left);
 
-  Label done, fail, ok;
+  NearLabel done, fail, ok;
   __ cmpq(left, right);
   __ j(equal, &ok);
   // Fail if either is a non-HeapObject.
@@ -2770,41 +2716,41 @@
   __ Move(rax, Factory::true_value());
   __ bind(&done);
 
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   __ testl(FieldOperand(rax, String::kHashFieldOffset),
            Immediate(String::kContainsCachedArrayIndexMask));
   __ j(zero, if_true);
   __ jmp(if_false);
 
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
-  VisitForValue(args->at(0), kAccumulator);
+  VisitForAccumulatorValue(args->at(0));
 
   __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
   ASSERT(String::kHashShift >= kSmiTagSize);
   __ IndexFromHash(rax, rax);
 
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -2828,7 +2774,7 @@
   // Push the arguments ("left-to-right").
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
-    VisitForValue(args->at(i), kStack);
+    VisitForStackValue(args->at(i));
   }
 
   if (expr->is_jsruntime()) {
@@ -2836,13 +2782,13 @@
     __ Move(rcx, expr->name());
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
     Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
-    __ call(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
     // Restore context register.
     __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   } else {
     __ CallRuntime(expr->function(), arg_count);
   }
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
@@ -2856,20 +2802,20 @@
         // Result of deleting non-property, non-variable reference is true.
         // The subexpression may have side effects.
         VisitForEffect(expr->expression());
-        Apply(context_, true);
+        context()->Plug(true);
       } else if (var != NULL &&
                  !var->is_global() &&
-                 var->slot() != NULL &&
-                 var->slot()->type() != Slot::LOOKUP) {
+                 var->AsSlot() != NULL &&
+                 var->AsSlot()->type() != Slot::LOOKUP) {
         // Result of deleting non-global, non-dynamic variables is false.
         // The subexpression does not have side effects.
-        Apply(context_, false);
+        context()->Plug(false);
       } else {
         // Property or variable reference.  Call the delete builtin with
         // object and property name as arguments.
         if (prop != NULL) {
-          VisitForValue(prop->obj(), kStack);
-          VisitForValue(prop->key(), kStack);
+          VisitForStackValue(prop->obj());
+          VisitForStackValue(prop->key());
         } else if (var->is_global()) {
           __ push(CodeGenerator::GlobalObject());
           __ Push(var->name());
@@ -2883,7 +2829,7 @@
           __ Push(var->name());
         }
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-        Apply(context_, rax);
+        context()->Plug(rax);
       }
       break;
     }
@@ -2891,26 +2837,7 @@
     case Token::VOID: {
       Comment cmnt(masm_, "[ UnaryOperation (VOID)");
       VisitForEffect(expr->expression());
-      switch (context_) {
-        case Expression::kUninitialized:
-          UNREACHABLE();
-          break;
-        case Expression::kEffect:
-          break;
-        case Expression::kValue:
-          switch (location_) {
-            case kAccumulator:
-              __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
-              break;
-            case kStack:
-              __ PushRoot(Heap::kUndefinedValueRootIndex);
-              break;
-          }
-          break;
-        case Expression::kTest:
-          __ jmp(false_label_);
-          break;
-      }
+      context()->Plug(Heap::kUndefinedValueRootIndex);
       break;
     }
 
@@ -2921,31 +2848,33 @@
       Label* if_false = NULL;
       Label* fall_through = NULL;
       // Notice that the labels are swapped.
-      PrepareTest(&materialize_true, &materialize_false,
-                  &if_false, &if_true, &fall_through);
+      context()->PrepareTest(&materialize_true, &materialize_false,
+                             &if_false, &if_true, &fall_through);
       VisitForControl(expr->expression(), if_true, if_false, fall_through);
-      Apply(context_, if_false, if_true);  // Labels swapped.
+      context()->Plug(if_false, if_true);  // Labels swapped.
       break;
     }
 
     case Token::TYPEOF: {
       Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
-      VisitForTypeofValue(expr->expression(), kStack);
+      { StackValueContext context(this);
+        VisitForTypeofValue(expr->expression());
+      }
       __ CallRuntime(Runtime::kTypeof, 1);
-      Apply(context_, rax);
+      context()->Plug(rax);
       break;
     }
 
     case Token::ADD: {
       Comment cmt(masm_, "[ UnaryOperation (ADD)");
-      VisitForValue(expr->expression(), kAccumulator);
-      Label no_conversion;
+      VisitForAccumulatorValue(expr->expression());
+      NearLabel no_conversion;
       Condition is_smi = masm_->CheckSmi(result_register());
       __ j(is_smi, &no_conversion);
       __ push(result_register());
       __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
       __ bind(&no_conversion);
-      Apply(context_, result_register());
+      context()->Plug(result_register());
       break;
     }
 
@@ -2954,12 +2883,12 @@
       bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
       UnaryOverwriteMode overwrite =
           can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::SUB, overwrite);
+      GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
       // GenericUnaryOpStub expects the argument to be in the
       // accumulator register rax.
-      VisitForValue(expr->expression(), kAccumulator);
+      VisitForAccumulatorValue(expr->expression());
       __ CallStub(&stub);
-      Apply(context_, rax);
+      context()->Plug(rax);
       break;
     }
 
@@ -2967,9 +2896,10 @@
       Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
       // The generic unary operation stub expects the argument to be
       // in the accumulator register rax.
-      VisitForValue(expr->expression(), kAccumulator);
+      VisitForAccumulatorValue(expr->expression());
       Label done;
-      if (ShouldInlineSmiCase(expr->op())) {
+      bool inline_smi_case = ShouldInlineSmiCase(expr->op());
+      if (inline_smi_case) {
         Label call_stub;
         __ JumpIfNotSmi(rax, &call_stub);
         __ SmiNot(rax, rax);
@@ -2979,10 +2909,13 @@
       bool overwrite = expr->expression()->ResultOverwriteAllowed();
       UnaryOverwriteMode mode =
           overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::BIT_NOT, mode);
+      UnaryOpFlags flags = inline_smi_case
+          ? NO_UNARY_SMI_CODE_IN_STUB
+          : NO_UNARY_FLAGS;
+      GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
       __ CallStub(&stub);
       __ bind(&done);
-      Apply(context_, rax);
+      context()->Plug(rax);
       break;
     }
 
@@ -3018,23 +2951,20 @@
   // Evaluate expression and get value.
   if (assign_type == VARIABLE) {
     ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
-    Location saved_location = location_;
-    location_ = kAccumulator;
-    EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
-                     Expression::kValue);
-    location_ = saved_location;
+    AccumulatorValueContext context(this);
+    EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
   } else {
     // Reserve space for result of postfix operation.
-    if (expr->is_postfix() && context_ != Expression::kEffect) {
+    if (expr->is_postfix() && !context()->IsEffect()) {
       __ Push(Smi::FromInt(0));
     }
     if (assign_type == NAMED_PROPERTY) {
-      VisitForValue(prop->obj(), kAccumulator);
+      VisitForAccumulatorValue(prop->obj());
       __ push(rax);  // Copy of receiver, needed for later store.
       EmitNamedPropertyLoad(prop);
     } else {
-      VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kAccumulator);
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
       __ movq(rdx, Operand(rsp, 0));  // Leave receiver on stack
       __ push(rax);  // Copy of key, needed for later store.
       EmitKeyedPropertyLoad(prop);
@@ -3042,7 +2972,7 @@
   }
 
   // Call ToNumber only if operand is not a smi.
-  Label no_conversion;
+  NearLabel no_conversion;
   Condition is_smi;
   is_smi = masm_->CheckSmi(rax);
   __ j(is_smi, &no_conversion);
@@ -3052,29 +2982,21 @@
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
-    switch (context_) {
-      case Expression::kUninitialized:
-        UNREACHABLE();
-      case Expression::kEffect:
-        // Do not save result.
-        break;
-      case Expression::kValue:
-      case Expression::kTest:
-        // Save the result on the stack. If we have a named or keyed property
-        // we store the result under the receiver that is currently on top
-        // of the stack.
-        switch (assign_type) {
-          case VARIABLE:
-            __ push(rax);
-            break;
-          case NAMED_PROPERTY:
-            __ movq(Operand(rsp, kPointerSize), rax);
-            break;
-          case KEYED_PROPERTY:
-            __ movq(Operand(rsp, 2 * kPointerSize), rax);
-            break;
-        }
-        break;
+    if (!context()->IsEffect()) {
+      // Save the result on the stack. If we have a named or keyed property
+      // we store the result under the receiver that is currently on top
+      // of the stack.
+      switch (assign_type) {
+        case VARIABLE:
+          __ push(rax);
+          break;
+        case NAMED_PROPERTY:
+          __ movq(Operand(rsp, kPointerSize), rax);
+          break;
+        case KEYED_PROPERTY:
+          __ movq(Operand(rsp, 2 * kPointerSize), rax);
+          break;
+      }
     }
   }
 
@@ -3111,35 +3033,32 @@
     case VARIABLE:
       if (expr->is_postfix()) {
         // Perform the assignment as if via '='.
-        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN,
-                               Expression::kEffect);
+        { EffectContext context(this);
+          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                                 Token::ASSIGN);
+        }
         // For all contexts except kEffect: We have the result on
         // top of the stack.
-        if (context_ != Expression::kEffect) {
-          ApplyTOS(context_);
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
         }
       } else {
         // Perform the assignment as if via '='.
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
-                               Token::ASSIGN,
-                               context_);
+                               Token::ASSIGN);
       }
       break;
     case NAMED_PROPERTY: {
       __ Move(rcx, prop->key()->AsLiteral()->handle());
       __ pop(rdx);
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // This nop signals to the IC that there is no inlined code at the call
-      // site for it to patch.
-      __ nop();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
-        if (context_ != Expression::kEffect) {
-          ApplyTOS(context_);
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
         }
       } else {
-        Apply(context_, rax);
+        context()->Plug(rax);
       }
       break;
     }
@@ -3147,16 +3066,13 @@
       __ pop(rcx);
       __ pop(rdx);
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // This nop signals to the IC that there is no inlined code at the call
-      // site for it to patch.
-      __ nop();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
-        if (context_ != Expression::kEffect) {
-          ApplyTOS(context_);
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
         }
       } else {
-        Apply(context_, rax);
+        context()->Plug(rax);
       }
       break;
     }
@@ -3164,8 +3080,11 @@
 }
 
 
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
   VariableProxy* proxy = expr->AsVariableProxy();
+  ASSERT(!context()->IsEffect());
+  ASSERT(!context()->IsTest());
+
   if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
     Comment cmnt(masm_, "Global variable");
     __ Move(rcx, proxy->name());
@@ -3173,16 +3092,16 @@
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    if (where == kStack) __ push(rax);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    context()->Plug(rax);
   } else if (proxy != NULL &&
-             proxy->var()->slot() != NULL &&
-             proxy->var()->slot()->type() == Slot::LOOKUP) {
+             proxy->var()->AsSlot() != NULL &&
+             proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
     Label done, slow;
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    Slot* slot = proxy->var()->slot();
+    Slot* slot = proxy->var()->AsSlot();
     EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
 
     __ bind(&slow);
@@ -3191,10 +3110,10 @@
     __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
     __ bind(&done);
 
-    if (where == kStack) __ push(rax);
+    context()->Plug(rax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitForValue(expr, where);
+    Visit(expr);
   }
 }
 
@@ -3216,7 +3135,10 @@
   if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
   Handle<String> check = Handle<String>::cast(right_literal_value);
 
-  VisitForTypeofValue(left_unary->expression(), kAccumulator);
+  { AccumulatorValueContext context(this);
+    VisitForTypeofValue(left_unary->expression());
+  }
+
   if (check->Equals(Heap::number_symbol())) {
     Condition is_smi = masm_->CheckSmi(rax);
     __ j(is_smi, if_true);
@@ -3291,8 +3213,8 @@
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
@@ -3300,21 +3222,21 @@
   Expression* left = expr->left();
   Expression* right = expr->right();
   if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
-    Apply(context_, if_true, if_false);
+    context()->Plug(if_true, if_false);
     return;
   }
 
-  VisitForValue(expr->left(), kStack);
+  VisitForStackValue(expr->left());
   switch (op) {
     case Token::IN:
-      VisitForValue(expr->right(), kStack);
+      VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
       __ CompareRoot(rax, Heap::kTrueValueRootIndex);
       Split(equal, if_true, if_false, fall_through);
       break;
 
     case Token::INSTANCEOF: {
-      VisitForValue(expr->right(), kStack);
+      VisitForStackValue(expr->right());
       InstanceofStub stub;
       __ CallStub(&stub);
       __ testq(rax, rax);
@@ -3324,7 +3246,7 @@
     }
 
     default: {
-      VisitForValue(expr->right(), kAccumulator);
+      VisitForAccumulatorValue(expr->right());
       Condition cc = no_condition;
       bool strict = false;
       switch (op) {
@@ -3361,7 +3283,8 @@
           UNREACHABLE();
       }
 
-      if (ShouldInlineSmiCase(op)) {
+      bool inline_smi_code = ShouldInlineSmiCase(op);
+      if (inline_smi_code) {
         Label slow_case;
         __ JumpIfNotBothSmi(rax, rdx, &slow_case);
         __ SmiCompare(rdx, rax);
@@ -3369,7 +3292,10 @@
         __ bind(&slow_case);
       }
 
-      CompareStub stub(cc, strict);
+      CompareFlags flags = inline_smi_code
+          ? NO_SMI_COMPARE_IN_STUB
+          : NO_COMPARE_FLAGS;
+      CompareStub stub(cc, strict, flags);
       __ CallStub(&stub);
       __ testq(rax, rax);
       Split(cc, if_true, if_false, fall_through);
@@ -3378,7 +3304,7 @@
 
   // Convert the result of the comparison into one expected for this
   // expression's context.
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -3388,10 +3314,10 @@
   Label* if_true = NULL;
   Label* if_false = NULL;
   Label* fall_through = NULL;
-  PrepareTest(&materialize_true, &materialize_false,
-              &if_true, &if_false, &fall_through);
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
 
-  VisitForValue(expr->expression(), kAccumulator);
+  VisitForAccumulatorValue(expr->expression());
   __ CompareRoot(rax, Heap::kNullValueRootIndex);
   if (expr->is_strict()) {
     Split(equal, if_true, if_false, fall_through);
@@ -3407,20 +3333,46 @@
              Immediate(1 << Map::kIsUndetectable));
     Split(not_zero, if_true, if_false, fall_through);
   }
-  Apply(context_, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
   __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-  Apply(context_, rax);
+  context()->Plug(rax);
 }
 
 
-Register FullCodeGenerator::result_register() { return rax; }
+Register FullCodeGenerator::result_register() {
+  return rax;
+}
 
 
-Register FullCodeGenerator::context_register() { return rsi; }
+Register FullCodeGenerator::context_register() {
+  return rsi;
+}
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+  ASSERT(mode == RelocInfo::CODE_TARGET ||
+         mode == RelocInfo::CODE_TARGET_CONTEXT);
+  __ call(ic, mode);
+
+  // If we're calling a (keyed) load or store stub, we have to mark
+  // the call as containing no inlined code so we will not attempt to
+  // patch it.
+  switch (ic->kind()) {
+    case Code::LOAD_IC:
+    case Code::KEYED_LOAD_IC:
+    case Code::STORE_IC:
+    case Code::KEYED_STORE_IC:
+      __ nop();  // Signals no inlined code.
+      break;
+    default:
+      // Do nothing.
+      break;
+  }
+}
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index a74e621..62e7691 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -730,7 +730,6 @@
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-  Label index_out_of_range;
 
   Register receiver = rdx;
   Register index = rax;
@@ -745,7 +744,7 @@
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
-                                          &index_out_of_range,
+                                          &miss,  // When index out of range.
                                           STRING_INDEX_IS_ARRAY_INDEX);
   char_at_generator.GenerateFast(masm);
   __ ret(0);
@@ -753,10 +752,6 @@
   ICRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm, call_helper);
 
-  __ bind(&index_out_of_range);
-  __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-  __ ret(0);
-
   __ bind(&miss);
   GenerateMiss(masm);
 }
@@ -847,7 +842,7 @@
     // For the UnsignedInt array type, we need to see whether
     // the value can be represented in a Smi. If not, we need to convert
     // it to a HeapNumber.
-    Label box_int;
+    NearLabel box_int;
 
     __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
 
@@ -1032,7 +1027,7 @@
   // No more bailouts to slow case on this path, so key not needed.
   __ SmiToInteger32(rdi, rax);
   {  // Clamp the value to [0..255].
-    Label done;
+    NearLabel done;
     __ testl(rdi, Immediate(0xFFFFFF00));
     __ j(zero, &done);
     __ setcc(negative, rdi);  // 1 if negative, 0 if positive.
@@ -1082,7 +1077,7 @@
   // rax: value
   // rbx: receiver's elements array (a FixedArray)
   // rcx: index
-  Label non_smi_value;
+  NearLabel non_smi_value;
   __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
           rax);
   __ JumpIfNotSmi(rax, &non_smi_value);
@@ -1104,7 +1099,7 @@
   //  -- rdx     : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
-  Label slow, check_heap_number;
+  Label slow;
 
   // Check that the object isn't a smi.
   __ JumpIfSmi(rdx, &slow);
@@ -1145,6 +1140,7 @@
   // rdx: receiver (a JSObject)
   // rbx: elements array
   // rdi: untagged key
+  NearLabel check_heap_number;
   __ JumpIfNotSmi(rax, &check_heap_number);
   // No more branches to slow case on this path.  Key and receiver not needed.
   __ SmiToInteger32(rdx, rax);
@@ -1488,7 +1484,7 @@
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
-  Label do_call, slow_call, slow_load, slow_reload_receiver;
+  Label do_call, slow_call, slow_load;
   Label check_number_dictionary, check_string, lookup_monomorphic_cache;
   Label index_smi, index_string;
 
@@ -1730,6 +1726,14 @@
 }
 
 
+bool LoadIC::PatchInlinedContextualLoad(Address address,
+                                        Object* map,
+                                        Object* cell) {
+  // TODO(<bug#>): implement this.
+  return false;
+}
+
+
 // The offset from the inlined patch site to the start of the inlined
 // store instruction.
 const int StoreIC::kOffsetToStoreInstruction = 20;
@@ -1880,7 +1884,7 @@
   //  -- rsp[0] : return address
   // -----------------------------------
 
-  Label miss, restore_miss;
+  Label miss;
 
   GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 165c51d..869986e 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -85,7 +85,7 @@
                                        Register scratch) {
   if (FLAG_debug_code) {
     // Check that the object is not in new space.
-    Label not_in_new_space;
+    NearLabel not_in_new_space;
     InNewSpace(object, scratch, not_equal, &not_in_new_space);
     Abort("new-space object passed to RecordWriteHelper");
     bind(&not_in_new_space);
@@ -171,7 +171,7 @@
   Label done;
 
   if (FLAG_debug_code) {
-    Label okay;
+    NearLabel okay;
     JumpIfNotSmi(object, &okay);
     Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
     bind(&okay);
@@ -221,42 +221,6 @@
   }
 }
 
-
-void MacroAssembler::InNewSpace(Register object,
-                                Register scratch,
-                                Condition cc,
-                                Label* branch) {
-  if (Serializer::enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    if (scratch.is(object)) {
-      movq(kScratchRegister, ExternalReference::new_space_mask());
-      and_(scratch, kScratchRegister);
-    } else {
-      movq(scratch, ExternalReference::new_space_mask());
-      and_(scratch, object);
-    }
-    movq(kScratchRegister, ExternalReference::new_space_start());
-    cmpq(scratch, kScratchRegister);
-    j(cc, branch);
-  } else {
-    ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
-    intptr_t new_space_start =
-        reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
-    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
-    if (scratch.is(object)) {
-      addq(scratch, kScratchRegister);
-    } else {
-      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
-    }
-    and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
-    j(cc, branch);
-  }
-}
-
-
 void MacroAssembler::Assert(Condition cc, const char* msg) {
   if (FLAG_debug_code) Check(cc, msg);
 }
@@ -264,7 +228,7 @@
 
 void MacroAssembler::AssertFastElements(Register elements) {
   if (FLAG_debug_code) {
-    Label ok;
+    NearLabel ok;
     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
                 Heap::kFixedArrayMapRootIndex);
     j(equal, &ok);
@@ -278,7 +242,7 @@
 
 
 void MacroAssembler::Check(Condition cc, const char* msg) {
-  Label L;
+  NearLabel L;
   j(cc, &L);
   Abort(msg);
   // will not return here
@@ -291,7 +255,7 @@
   int frame_alignment_mask = frame_alignment - 1;
   if (frame_alignment > kPointerSize) {
     ASSERT(IsPowerOf2(frame_alignment));
-    Label alignment_as_expected;
+    NearLabel alignment_as_expected;
     testq(rsp, Immediate(frame_alignment_mask));
     j(zero, &alignment_as_expected);
     // Abort if stack is not aligned.
@@ -304,7 +268,7 @@
 void MacroAssembler::NegativeZeroTest(Register result,
                                       Register op,
                                       Label* then_label) {
-  Label ok;
+  NearLabel ok;
   testl(result, result);
   j(not_zero, &ok);
   testl(op, op);
@@ -642,8 +606,6 @@
 // ----------------------------------------------------------------------------
 // Smi tagging, untagging and tag detection.
 
-static int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
 Register MacroAssembler::GetSmiConstant(Smi* source) {
   int value = source->value();
   if (value == 0) {
@@ -666,7 +628,7 @@
     if (allow_stub_calls()) {
       Assert(equal, "Uninitialized kSmiConstantRegister");
     } else {
-      Label ok;
+      NearLabel ok;
       j(equal, &ok);
       int3();
       bind(&ok);
@@ -716,6 +678,7 @@
   }
 }
 
+
 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
   ASSERT_EQ(0, kSmiTag);
   if (!dst.is(src)) {
@@ -725,22 +688,10 @@
 }
 
 
-void MacroAssembler::Integer32ToSmi(Register dst,
-                                    Register src,
-                                    Label* on_overflow) {
-  ASSERT_EQ(0, kSmiTag);
-  // 32-bit integer always fits in a long smi.
-  if (!dst.is(src)) {
-    movl(dst, src);
-  }
-  shl(dst, Immediate(kSmiShift));
-}
-
-
 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
   if (FLAG_debug_code) {
     testb(dst, Immediate(0x01));
-    Label ok;
+    NearLabel ok;
     j(zero, &ok);
     if (allow_stub_calls()) {
       Abort("Integer32ToSmiField writing to non-smi location");
@@ -949,180 +900,6 @@
 }
 
 
-void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
-  if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
-    movq(kScratchRegister, src);
-    neg(dst);  // Low 32 bits are retained as zero by negation.
-    // Test if result is zero or Smi::kMinValue.
-    cmpq(dst, kScratchRegister);
-    j(not_equal, on_smi_result);
-    movq(src, kScratchRegister);
-  } else {
-    movq(dst, src);
-    neg(dst);
-    cmpq(dst, src);
-    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
-    j(not_equal, on_smi_result);
-  }
-}
-
-
-void MacroAssembler::SmiAdd(Register dst,
-                            Register src1,
-                            Register src2,
-                            Label* on_not_smi_result) {
-  ASSERT(!dst.is(src2));
-  if (on_not_smi_result == NULL) {
-    // No overflow checking. Use only when it's known that
-    // overflowing is impossible.
-    if (dst.is(src1)) {
-      addq(dst, src2);
-    } else {
-      movq(dst, src1);
-      addq(dst, src2);
-    }
-    Assert(no_overflow, "Smi addition overflow");
-  } else if (dst.is(src1)) {
-    movq(kScratchRegister, src1);
-    addq(kScratchRegister, src2);
-    j(overflow, on_not_smi_result);
-    movq(dst, kScratchRegister);
-  } else {
-    movq(dst, src1);
-    addq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
-                            Register src1,
-                            Register src2,
-                            Label* on_not_smi_result) {
-  ASSERT(!dst.is(src2));
-  if (on_not_smi_result == NULL) {
-    // No overflow checking. Use only when it's known that
-    // overflowing is impossible (e.g., subtracting two positive smis).
-    if (dst.is(src1)) {
-      subq(dst, src2);
-    } else {
-      movq(dst, src1);
-      subq(dst, src2);
-    }
-    Assert(no_overflow, "Smi subtraction overflow");
-  } else if (dst.is(src1)) {
-    cmpq(dst, src2);
-    j(overflow, on_not_smi_result);
-    subq(dst, src2);
-  } else {
-    movq(dst, src1);
-    subq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
-                            Register src1,
-                            const Operand& src2,
-                            Label* on_not_smi_result) {
-  if (on_not_smi_result == NULL) {
-    // No overflow checking. Use only when it's known that
-    // overflowing is impossible (e.g., subtracting two positive smis).
-    if (dst.is(src1)) {
-      subq(dst, src2);
-    } else {
-      movq(dst, src1);
-      subq(dst, src2);
-    }
-    Assert(no_overflow, "Smi subtraction overflow");
-  } else if (dst.is(src1)) {
-    movq(kScratchRegister, src2);
-    cmpq(src1, kScratchRegister);
-    j(overflow, on_not_smi_result);
-    subq(src1, kScratchRegister);
-  } else {
-    movq(dst, src1);
-    subq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-void MacroAssembler::SmiMul(Register dst,
-                            Register src1,
-                            Register src2,
-                            Label* on_not_smi_result) {
-  ASSERT(!dst.is(src2));
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-
-  if (dst.is(src1)) {
-    Label failure, zero_correct_result;
-    movq(kScratchRegister, src1);  // Create backup for later testing.
-    SmiToInteger64(dst, src1);
-    imul(dst, src2);
-    j(overflow, &failure);
-
-    // Check for negative zero result.  If product is zero, and one
-    // argument is negative, go to slow case.
-    Label correct_result;
-    testq(dst, dst);
-    j(not_zero, &correct_result);
-
-    movq(dst, kScratchRegister);
-    xor_(dst, src2);
-    j(positive, &zero_correct_result);  // Result was positive zero.
-
-    bind(&failure);  // Reused failure exit, restores src1.
-    movq(src1, kScratchRegister);
-    jmp(on_not_smi_result);
-
-    bind(&zero_correct_result);
-    xor_(dst, dst);
-
-    bind(&correct_result);
-  } else {
-    SmiToInteger64(dst, src1);
-    imul(dst, src2);
-    j(overflow, on_not_smi_result);
-    // Check for negative zero result.  If product is zero, and one
-    // argument is negative, go to slow case.
-    Label correct_result;
-    testq(dst, dst);
-    j(not_zero, &correct_result);
-    // One of src1 and src2 is zero, the check whether the other is
-    // negative.
-    movq(kScratchRegister, src1);
-    xor_(kScratchRegister, src2);
-    j(negative, on_not_smi_result);
-    bind(&correct_result);
-  }
-}
-
-
-void MacroAssembler::SmiTryAddConstant(Register dst,
-                                       Register src,
-                                       Smi* constant,
-                                       Label* on_not_smi_result) {
-  // Does not assume that src is a smi.
-  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src.is(kScratchRegister));
-
-  JumpIfNotSmi(src, on_not_smi_result);
-  Register tmp = (dst.is(src) ? kScratchRegister : dst);
-  LoadSmiConstant(tmp, constant);
-  addq(tmp, src);
-  j(overflow, on_not_smi_result);
-  if (dst.is(src)) {
-    movq(dst, tmp);
-  }
-}
-
-
 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
   if (constant->value() == 0) {
     if (!dst.is(src)) {
@@ -1179,29 +956,6 @@
 }
 
 
-void MacroAssembler::SmiAddConstant(Register dst,
-                                    Register src,
-                                    Smi* constant,
-                                    Label* on_not_smi_result) {
-  if (constant->value() == 0) {
-    if (!dst.is(src)) {
-      movq(dst, src);
-    }
-  } else if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
-
-    LoadSmiConstant(kScratchRegister, constant);
-    addq(kScratchRegister, src);
-    j(overflow, on_not_smi_result);
-    movq(dst, kScratchRegister);
-  } else {
-    LoadSmiConstant(dst, constant);
-    addq(dst, src);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
   if (constant->value() == 0) {
     if (!dst.is(src)) {
@@ -1226,165 +980,48 @@
 }
 
 
-void MacroAssembler::SmiSubConstant(Register dst,
-                                    Register src,
-                                    Smi* constant,
-                                    Label* on_not_smi_result) {
-  if (constant->value() == 0) {
-    if (!dst.is(src)) {
-      movq(dst, src);
-    }
-  } else if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
-    if (constant->value() == Smi::kMinValue) {
-      // Subtracting min-value from any non-negative value will overflow.
-      // We test the non-negativeness before doing the subtraction.
-      testq(src, src);
-      j(not_sign, on_not_smi_result);
-      LoadSmiConstant(kScratchRegister, constant);
-      subq(dst, kScratchRegister);
-    } else {
-      // Subtract by adding the negation.
-      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
-      addq(kScratchRegister, dst);
-      j(overflow, on_not_smi_result);
-      movq(dst, kScratchRegister);
-    }
+void MacroAssembler::SmiAdd(Register dst,
+                            Register src1,
+                            Register src2) {
+  // No overflow checking. Use only when it's known that
+  // overflowing is impossible.
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    addq(dst, src2);
   } else {
-    if (constant->value() == Smi::kMinValue) {
-      // Subtracting min-value from any non-negative value will overflow.
-      // We test the non-negativeness before doing the subtraction.
-      testq(src, src);
-      j(not_sign, on_not_smi_result);
-      LoadSmiConstant(dst, constant);
-      // Adding and subtracting the min-value gives the same result, it only
-      // differs on the overflow bit, which we don't check here.
-      addq(dst, src);
-    } else {
-      // Subtract by adding the negation.
-      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
-      addq(dst, src);
-      j(overflow, on_not_smi_result);
-    }
+    movq(dst, src1);
+    addq(dst, src2);
   }
+  Assert(no_overflow, "Smi addition overflow");
 }
 
 
-void MacroAssembler::SmiDiv(Register dst,
-                            Register src1,
-                            Register src2,
-                            Label* on_not_smi_result) {
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src2.is(rax));
-  ASSERT(!src2.is(rdx));
-  ASSERT(!src1.is(rdx));
-
-  // Check for 0 divisor (result is +/-Infinity).
-  Label positive_divisor;
-  testq(src2, src2);
-  j(zero, on_not_smi_result);
-
-  if (src1.is(rax)) {
-    movq(kScratchRegister, src1);
-  }
-  SmiToInteger32(rax, src1);
-  // We need to rule out dividing Smi::kMinValue by -1, since that would
-  // overflow in idiv and raise an exception.
-  // We combine this with negative zero test (negative zero only happens
-  // when dividing zero by a negative number).
-
-  // We overshoot a little and go to slow case if we divide min-value
-  // by any negative value, not just -1.
-  Label safe_div;
-  testl(rax, Immediate(0x7fffffff));
-  j(not_zero, &safe_div);
-  testq(src2, src2);
-  if (src1.is(rax)) {
-    j(positive, &safe_div);
-    movq(src1, kScratchRegister);
-    jmp(on_not_smi_result);
+void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
+  // No overflow checking. Use only when it's known that
+  // overflowing is impossible (e.g., subtracting two positive smis).
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    subq(dst, src2);
   } else {
-    j(negative, on_not_smi_result);
+    movq(dst, src1);
+    subq(dst, src2);
   }
-  bind(&safe_div);
-
-  SmiToInteger32(src2, src2);
-  // Sign extend src1 into edx:eax.
-  cdq();
-  idivl(src2);
-  Integer32ToSmi(src2, src2);
-  // Check that the remainder is zero.
-  testl(rdx, rdx);
-  if (src1.is(rax)) {
-    Label smi_result;
-    j(zero, &smi_result);
-    movq(src1, kScratchRegister);
-    jmp(on_not_smi_result);
-    bind(&smi_result);
-  } else {
-    j(not_zero, on_not_smi_result);
-  }
-  if (!dst.is(src1) && src1.is(rax)) {
-    movq(src1, kScratchRegister);
-  }
-  Integer32ToSmi(dst, rax);
+  Assert(no_overflow, "Smi subtraction overflow");
 }
 
 
-void MacroAssembler::SmiMod(Register dst,
+void MacroAssembler::SmiSub(Register dst,
                             Register src1,
-                            Register src2,
-                            Label* on_not_smi_result) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!src2.is(rax));
-  ASSERT(!src2.is(rdx));
-  ASSERT(!src1.is(rdx));
-  ASSERT(!src1.is(src2));
-
-  testq(src2, src2);
-  j(zero, on_not_smi_result);
-
-  if (src1.is(rax)) {
-    movq(kScratchRegister, src1);
+                            const Operand& src2) {
+  // No overflow checking. Use only when it's known that
+  // overflowing is impossible (e.g., subtracting two positive smis).
+  if (dst.is(src1)) {
+    subq(dst, src2);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
   }
-  SmiToInteger32(rax, src1);
-  SmiToInteger32(src2, src2);
-
-  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
-  Label safe_div;
-  cmpl(rax, Immediate(Smi::kMinValue));
-  j(not_equal, &safe_div);
-  cmpl(src2, Immediate(-1));
-  j(not_equal, &safe_div);
-  // Retag inputs and go slow case.
-  Integer32ToSmi(src2, src2);
-  if (src1.is(rax)) {
-    movq(src1, kScratchRegister);
-  }
-  jmp(on_not_smi_result);
-  bind(&safe_div);
-
-  // Sign extend eax into edx:eax.
-  cdq();
-  idivl(src2);
-  // Restore smi tags on inputs.
-  Integer32ToSmi(src2, src2);
-  if (src1.is(rax)) {
-    movq(src1, kScratchRegister);
-  }
-  // Check for a negative zero result.  If the result is zero, and the
-  // dividend is negative, go slow to return a floating point negative zero.
-  Label smi_result;
-  testl(rdx, rdx);
-  j(not_zero, &smi_result);
-  testq(src1, src1);
-  j(negative, on_not_smi_result);
-  bind(&smi_result);
-  Integer32ToSmi(dst, rdx);
+  Assert(no_overflow, "Smi subtraction overflow");
 }
 
 
@@ -1480,25 +1117,6 @@
 }
 
 
-void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
-                                                  Register src,
-                                                  int shift_value,
-                                                  Label* on_not_smi_result) {
-  // Logic right shift interprets its result as an *unsigned* number.
-  if (dst.is(src)) {
-    UNIMPLEMENTED();  // Not used.
-  } else {
-    movq(dst, src);
-    if (shift_value == 0) {
-      testq(dst, dst);
-      j(negative, on_not_smi_result);
-    }
-    shr(dst, Immediate(shift_value + kSmiShift));
-    shl(dst, Immediate(kSmiShift));
-  }
-}
-
-
 void MacroAssembler::SmiShiftLeftConstant(Register dst,
                                           Register src,
                                           int shift_value) {
@@ -1515,7 +1133,7 @@
                                   Register src1,
                                   Register src2) {
   ASSERT(!dst.is(rcx));
-  Label result_ok;
+  NearLabel result_ok;
   // Untag shift amount.
   if (!dst.is(src1)) {
     movq(dst, src1);
@@ -1527,42 +1145,6 @@
 }
 
 
-void MacroAssembler::SmiShiftLogicalRight(Register dst,
-                                          Register src1,
-                                          Register src2,
-                                          Label* on_not_smi_result) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(rcx));
-  Label result_ok;
-  if (src1.is(rcx) || src2.is(rcx)) {
-    movq(kScratchRegister, rcx);
-  }
-  if (!dst.is(src1)) {
-    movq(dst, src1);
-  }
-  SmiToInteger32(rcx, src2);
-  orl(rcx, Immediate(kSmiShift));
-  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
-  shl(dst, Immediate(kSmiShift));
-  testq(dst, dst);
-  if (src1.is(rcx) || src2.is(rcx)) {
-    Label positive_result;
-    j(positive, &positive_result);
-    if (src1.is(rcx)) {
-      movq(src1, kScratchRegister);
-    } else {
-      movq(src2, kScratchRegister);
-    }
-    jmp(on_not_smi_result);
-    bind(&positive_result);
-  } else {
-    j(negative, on_not_smi_result);  // src2 was zero and src1 negative.
-  }
-}
-
-
 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
                                              Register src1,
                                              Register src2) {
@@ -1590,44 +1172,6 @@
 }
 
 
-void MacroAssembler::SelectNonSmi(Register dst,
-                                  Register src1,
-                                  Register src2,
-                                  Label* on_not_smis) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(src1));
-  ASSERT(!dst.is(src2));
-  // Both operands must not be smis.
-#ifdef DEBUG
-  if (allow_stub_calls()) {  // Check contains a stub call.
-    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
-    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
-  }
-#endif
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT_EQ(0, Smi::FromInt(0));
-  movl(kScratchRegister, Immediate(kSmiTagMask));
-  and_(kScratchRegister, src1);
-  testl(kScratchRegister, src2);
-  // If non-zero then both are smis.
-  j(not_zero, on_not_smis);
-
-  // Exactly one operand is a smi.
-  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
-  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
-  subq(kScratchRegister, Immediate(1));
-  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
-  movq(dst, src1);
-  xor_(dst, src2);
-  and_(dst, kScratchRegister);
-  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
-  xor_(dst, src1);
-  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
-}
-
-
 SmiIndex MacroAssembler::SmiToIndex(Register dst,
                                     Register src,
                                     int shift) {
@@ -1663,136 +1207,13 @@
 }
 
 
-void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
-  ASSERT_EQ(0, kSmiTag);
-  Condition smi = CheckSmi(src);
-  j(smi, on_smi);
-}
-
-
-void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
-  Condition smi = CheckSmi(src);
-  j(NegateCondition(smi), on_not_smi);
-}
-
-
-void MacroAssembler::JumpIfNotPositiveSmi(Register src,
-                                          Label* on_not_positive_smi) {
-  Condition positive_smi = CheckPositiveSmi(src);
-  j(NegateCondition(positive_smi), on_not_positive_smi);
-}
-
-
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
-                                             Smi* constant,
-                                             Label* on_equals) {
-  SmiCompare(src, constant);
-  j(equal, on_equals);
-}
-
-
-void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
-  Condition is_valid = CheckInteger32ValidSmiValue(src);
-  j(NegateCondition(is_valid), on_invalid);
-}
-
-
-void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
-                                                Label* on_invalid) {
-  Condition is_valid = CheckUInteger32ValidSmiValue(src);
-  j(NegateCondition(is_valid), on_invalid);
-}
-
-
-void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
-                                      Label* on_not_both_smi) {
-  Condition both_smi = CheckBothSmi(src1, src2);
-  j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2,
-                                              Label* on_not_both_smi) {
-  Condition both_smi = CheckBothPositiveSmi(src1, src2);
-  j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
-                                                         Register second_object,
-                                                         Register scratch1,
-                                                         Register scratch2,
-                                                         Label* on_fail) {
-  // Check that both objects are not smis.
-  Condition either_smi = CheckEitherSmi(first_object, second_object);
-  j(either_smi, on_fail);
-
-  // Load instance type for both strings.
-  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
-  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
-  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
-  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
-  // Check that both are flat ascii strings.
-  ASSERT(kNotStringTag != 0);
-  const int kFlatAsciiStringMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
-  andl(scratch1, Immediate(kFlatAsciiStringMask));
-  andl(scratch2, Immediate(kFlatAsciiStringMask));
-  // Interleave the bits to check both scratch1 and scratch2 in one test.
-  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
-  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
-  cmpl(scratch1,
-       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
-  j(not_equal, on_fail);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
-    Register instance_type,
-    Register scratch,
-    Label *failure) {
-  if (!scratch.is(instance_type)) {
-    movl(scratch, instance_type);
+void MacroAssembler::Move(Register dst, Register src) {
+  if (!dst.is(src)) {
+    movq(dst, src);
   }
-
-  const int kFlatAsciiStringMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-
-  andl(scratch, Immediate(kFlatAsciiStringMask));
-  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
-  j(not_equal, failure);
 }
 
 
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
-    Register first_object_instance_type,
-    Register second_object_instance_type,
-    Register scratch1,
-    Register scratch2,
-    Label* on_fail) {
-  // Load instance type for both strings.
-  movq(scratch1, first_object_instance_type);
-  movq(scratch2, second_object_instance_type);
-
-  // Check that both are flat ascii strings.
-  ASSERT(kNotStringTag != 0);
-  const int kFlatAsciiStringMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
-  andl(scratch1, Immediate(kFlatAsciiStringMask));
-  andl(scratch2, Immediate(kFlatAsciiStringMask));
-  // Interleave the bits to check both scratch1 and scratch2 in one test.
-  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
-  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
-  cmpl(scratch1,
-       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
-  j(not_equal, on_fail);
-}
 
 
 void MacroAssembler::Move(Register dst, Handle<Object> source) {
@@ -1903,7 +1324,6 @@
 
 void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
   ASSERT(RelocInfo::IsCodeTarget(rmode));
-  WriteRecordedPositions();
   call(code_object, rmode);
 }
 
@@ -1994,7 +1414,7 @@
 
 
 void MacroAssembler::AbortIfNotNumber(Register object) {
-  Label ok;
+  NearLabel ok;
   Condition is_smi = CheckSmi(object);
   j(is_smi, &ok);
   Cmp(FieldOperand(object, HeapObject::kMapOffset),
@@ -2005,14 +1425,14 @@
 
 
 void MacroAssembler::AbortIfSmi(Register object) {
-  Label ok;
+  NearLabel ok;
   Condition is_smi = CheckSmi(object);
   Assert(NegateCondition(is_smi), "Operand is a smi");
 }
 
 
 void MacroAssembler::AbortIfNotSmi(Register object) {
-  Label ok;
+  NearLabel ok;
   Condition is_smi = CheckSmi(object);
   Assert(is_smi, "Operand is not a smi");
 }
@@ -2052,7 +1472,7 @@
   j(not_equal, miss);
 
   // Make sure that the function has an instance prototype.
-  Label non_instance;
+  NearLabel non_instance;
   testb(FieldOperand(result, Map::kBitFieldOffset),
         Immediate(1 << Map::kHasNonInstancePrototype));
   j(not_zero, &non_instance);
@@ -2068,7 +1488,7 @@
   j(equal, miss);
 
   // If the function does not have an initial map, we're done.
-  Label done;
+  NearLabel done;
   CmpObjectType(result, MAP_TYPE, kScratchRegister);
   j(not_equal, &done);
 
@@ -2133,76 +1553,11 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
-                                    const ParameterCount& actual,
-                                    Handle<Code> code_constant,
-                                    Register code_register,
-                                    Label* done,
-                                    InvokeFlag flag) {
-  bool definitely_matches = false;
-  Label invoke;
-  if (expected.is_immediate()) {
-    ASSERT(actual.is_immediate());
-    if (expected.immediate() == actual.immediate()) {
-      definitely_matches = true;
-    } else {
-      Set(rax, actual.immediate());
-      if (expected.immediate() ==
-              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
-        // Don't worry about adapting arguments for built-ins that
-        // don't want that done. Skip adaption code by making it look
-        // like we have a match between expected and actual number of
-        // arguments.
-        definitely_matches = true;
-      } else {
-        Set(rbx, expected.immediate());
-      }
-    }
-  } else {
-    if (actual.is_immediate()) {
-      // Expected is in register, actual is immediate. This is the
-      // case when we invoke function values without going through the
-      // IC mechanism.
-      cmpq(expected.reg(), Immediate(actual.immediate()));
-      j(equal, &invoke);
-      ASSERT(expected.reg().is(rbx));
-      Set(rax, actual.immediate());
-    } else if (!expected.reg().is(actual.reg())) {
-      // Both expected and actual are in (different) registers. This
-      // is the case when we invoke functions using call and apply.
-      cmpq(expected.reg(), actual.reg());
-      j(equal, &invoke);
-      ASSERT(actual.reg().is(rax));
-      ASSERT(expected.reg().is(rbx));
-    }
-  }
-
-  if (!definitely_matches) {
-    Handle<Code> adaptor =
-        Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
-    if (!code_constant.is_null()) {
-      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
-      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    } else if (!code_register.is(rdx)) {
-      movq(rdx, code_register);
-    }
-
-    if (flag == CALL_FUNCTION) {
-      Call(adaptor, RelocInfo::CODE_TARGET);
-      jmp(done);
-    } else {
-      Jump(adaptor, RelocInfo::CODE_TARGET);
-    }
-    bind(&invoke);
-  }
-}
-
-
 void MacroAssembler::InvokeCode(Register code,
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 InvokeFlag flag) {
-  Label done;
+  NearLabel done;
   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
   if (flag == CALL_FUNCTION) {
     call(code);
@@ -2219,7 +1574,7 @@
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag) {
-  Label done;
+  NearLabel done;
   Register dummy = rax;
   InvokePrologue(expected, actual, code, dummy, &done, flag);
   if (flag == CALL_FUNCTION) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 9f5a746..a8ffca9 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -91,10 +91,11 @@
   // Check if object is in new space. The condition cc can be equal or
   // not_equal. If it is equal a jump will be done if the object is on new
   // space. The register scratch can be object itself, but it will be clobbered.
+  template <typename LabelType>
   void InNewSpace(Register object,
                   Register scratch,
                   Condition cc,
-                  Label* branch);
+                  LabelType* branch);
 
   // For page containing |object| mark region covering [object+offset]
   // dirty. |object| is the object being stored into, |value| is the
@@ -215,14 +216,9 @@
 
   // Tag an integer value. The result must be known to be a valid smi value.
   // Only uses the low 32 bits of the src register. Sets the N and Z flags
-  // based on the value of the resulting integer.
+  // based on the value of the resulting smi.
   void Integer32ToSmi(Register dst, Register src);
 
-  // Tag an integer value if possible, or jump the integer value cannot be
-  // represented as a smi. Only uses the low 32 bit of the src registers.
-  // NOTICE: Destroys the dst register even if unsuccessful!
-  void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
-
   // Stores an integer32 value into a memory field that already holds a smi.
   void Integer32ToSmiField(const Operand& dst, Register src);
 
@@ -300,30 +296,42 @@
   // above with a conditional jump.
 
   // Jump if the value cannot be represented by a smi.
-  void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
+  template <typename LabelType>
+  void JumpIfNotValidSmiValue(Register src, LabelType* on_invalid);
 
   // Jump if the unsigned integer value cannot be represented by a smi.
-  void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid);
+  template <typename LabelType>
+  void JumpIfUIntNotValidSmiValue(Register src, LabelType* on_invalid);
 
   // Jump to label if the value is a tagged smi.
-  void JumpIfSmi(Register src, Label* on_smi);
+  template <typename LabelType>
+  void JumpIfSmi(Register src, LabelType* on_smi);
 
   // Jump to label if the value is not a tagged smi.
-  void JumpIfNotSmi(Register src, Label* on_not_smi);
+  template <typename LabelType>
+  void JumpIfNotSmi(Register src, LabelType* on_not_smi);
 
   // Jump to label if the value is not a positive tagged smi.
-  void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
+  template <typename LabelType>
+  void JumpIfNotPositiveSmi(Register src, LabelType* on_not_smi);
 
   // Jump to label if the value, which must be a tagged smi, has value equal
   // to the constant.
-  void JumpIfSmiEqualsConstant(Register src,  Smi* constant, Label* on_equals);
+  template <typename LabelType>
+  void JumpIfSmiEqualsConstant(Register src,
+                               Smi* constant,
+                               LabelType* on_equals);
 
   // Jump if either or both register are not smi values.
-  void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
+  template <typename LabelType>
+  void JumpIfNotBothSmi(Register src1,
+                        Register src2,
+                        LabelType* on_not_both_smi);
 
   // Jump if either or both register are not positive smi values.
+  template <typename LabelType>
   void JumpIfNotBothPositiveSmi(Register src1, Register src2,
-                                Label* on_not_both_smi);
+                                LabelType* on_not_both_smi);
 
   // Operations on tagged smi values.
 
@@ -333,10 +341,11 @@
   // Optimistically adds an integer constant to a supposed smi.
   // If the src is not a smi, or the result is not a smi, jump to
   // the label.
+  template <typename LabelType>
   void SmiTryAddConstant(Register dst,
                          Register src,
                          Smi* constant,
-                         Label* on_not_smi_result);
+                         LabelType* on_not_smi_result);
 
   // Add an integer constant to a tagged smi, giving a tagged smi as result.
   // No overflow testing on the result is done.
@@ -348,10 +357,11 @@
 
   // Add an integer constant to a tagged smi, giving a tagged smi as result,
   // or jumping to a label if the result cannot be represented by a smi.
+  template <typename LabelType>
   void SmiAddConstant(Register dst,
                       Register src,
                       Smi* constant,
-                      Label* on_not_smi_result);
+                      LabelType* on_not_smi_result);
 
   // Subtract an integer constant from a tagged smi, giving a tagged smi as
   // result. No testing on the result is done. Sets the N and Z flags
@@ -360,60 +370,80 @@
 
   // Subtract an integer constant from a tagged smi, giving a tagged smi as
   // result, or jumping to a label if the result cannot be represented by a smi.
+  template <typename LabelType>
   void SmiSubConstant(Register dst,
                       Register src,
                       Smi* constant,
-                      Label* on_not_smi_result);
+                      LabelType* on_not_smi_result);
 
   // Negating a smi can give a negative zero or too large positive value.
   // NOTICE: This operation jumps on success, not failure!
+  template <typename LabelType>
   void SmiNeg(Register dst,
               Register src,
-              Label* on_smi_result);
+              LabelType* on_smi_result);
 
   // Adds smi values and return the result as a smi.
   // If dst is src1, then src1 will be destroyed, even if
   // the operation is unsuccessful.
+  template <typename LabelType>
   void SmiAdd(Register dst,
               Register src1,
               Register src2,
-              Label* on_not_smi_result);
+              LabelType* on_not_smi_result);
+
+  void SmiAdd(Register dst,
+              Register src1,
+              Register src2);
 
   // Subtracts smi values and return the result as a smi.
   // If dst is src1, then src1 will be destroyed, even if
   // the operation is unsuccessful.
+  template <typename LabelType>
   void SmiSub(Register dst,
               Register src1,
               Register src2,
-              Label* on_not_smi_result);
+              LabelType* on_not_smi_result);
 
   void SmiSub(Register dst,
               Register src1,
+              Register src2);
+
+  template <typename LabelType>
+  void SmiSub(Register dst,
+              Register src1,
               const Operand& src2,
-              Label* on_not_smi_result);
+              LabelType* on_not_smi_result);
+
+  void SmiSub(Register dst,
+              Register src1,
+              const Operand& src2);
 
   // Multiplies smi values and return the result as a smi,
   // if possible.
   // If dst is src1, then src1 will be destroyed, even if
   // the operation is unsuccessful.
+  template <typename LabelType>
   void SmiMul(Register dst,
               Register src1,
               Register src2,
-              Label* on_not_smi_result);
+              LabelType* on_not_smi_result);
 
   // Divides one smi by another and returns the quotient.
   // Clobbers rax and rdx registers.
+  template <typename LabelType>
   void SmiDiv(Register dst,
               Register src1,
               Register src2,
-              Label* on_not_smi_result);
+              LabelType* on_not_smi_result);
 
   // Divides one smi by another and returns the remainder.
   // Clobbers rax and rdx registers.
+  template <typename LabelType>
   void SmiMod(Register dst,
               Register src1,
               Register src2,
-              Label* on_not_smi_result);
+              LabelType* on_not_smi_result);
 
   // Bitwise operations.
   void SmiNot(Register dst, Register src);
@@ -427,10 +457,11 @@
   void SmiShiftLeftConstant(Register dst,
                             Register src,
                             int shift_value);
+  template <typename LabelType>
   void SmiShiftLogicalRightConstant(Register dst,
                                   Register src,
                                   int shift_value,
-                                  Label* on_not_smi_result);
+                                  LabelType* on_not_smi_result);
   void SmiShiftArithmeticRightConstant(Register dst,
                                        Register src,
                                        int shift_value);
@@ -443,10 +474,11 @@
   // Shifts a smi value to the right, shifting in zero bits at the top, and
   // returns the unsigned intepretation of the result if that is a smi.
   // Uses and clobbers rcx, so dst may not be rcx.
+  template <typename LabelType>
   void SmiShiftLogicalRight(Register dst,
-                          Register src1,
-                          Register src2,
-                          Label* on_not_smi_result);
+                            Register src1,
+                            Register src2,
+                            LabelType* on_not_smi_result);
   // Shifts a smi value to the right, sign extending the top, and
   // returns the signed intepretation of the result. That will always
   // be a valid smi value, since it's numerically smaller than the
@@ -460,10 +492,11 @@
 
   // Select the non-smi register of two registers where exactly one is a
   // smi. If neither are smis, jump to the failure label.
+  template <typename LabelType>
   void SelectNonSmi(Register dst,
                     Register src1,
                     Register src2,
-                    Label* on_not_smis);
+                    LabelType* on_not_smis);
 
   // Converts, if necessary, a smi to a combination of number and
   // multiplier to be used as a scaled index.
@@ -493,25 +526,29 @@
 
   // ---------------------------------------------------------------------------
   // String macros.
+  template <typename LabelType>
   void JumpIfNotBothSequentialAsciiStrings(Register first_object,
                                            Register second_object,
                                            Register scratch1,
                                            Register scratch2,
-                                           Label* on_not_both_flat_ascii);
+                                           LabelType* on_not_both_flat_ascii);
 
   // Check whether the instance type represents a flat ascii string. Jump to the
   // label if not. If the instance type can be scratched specify same register
   // for both instance type and scratch.
-  void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
-                                              Register scratch,
-                                              Label *on_not_flat_ascii_string);
+  template <typename LabelType>
+  void JumpIfInstanceTypeIsNotSequentialAscii(
+      Register instance_type,
+      Register scratch,
+      LabelType *on_not_flat_ascii_string);
 
+  template <typename LabelType>
   void JumpIfBothInstanceTypesAreNotSequentialAscii(
       Register first_object_instance_type,
       Register second_object_instance_type,
       Register scratch1,
       Register scratch2,
-      Label* on_fail);
+      LabelType* on_fail);
 
   // ---------------------------------------------------------------------------
   // Macro instructions.
@@ -520,6 +557,9 @@
   void Set(Register dst, int64_t x);
   void Set(const Operand& dst, int64_t x);
 
+  // Move if the registers are not identical.
+  void Move(Register target, Register source);
+
   // Handle support
   void Move(Register dst, Handle<Object> source);
   void Move(const Operand& dst, Handle<Object> source);
@@ -865,11 +905,12 @@
   Handle<Object> code_object_;
 
   // Helper functions for generating invokes.
+  template <typename LabelType>
   void InvokePrologue(const ParameterCount& expected,
                       const ParameterCount& actual,
                       Handle<Code> code_constant,
                       Register code_register,
-                      Label* done,
+                      LabelType* done,
                       InvokeFlag flag);
 
   // Activation support.
@@ -961,6 +1002,697 @@
 #define ACCESS_MASM(masm) masm->
 #endif
 
+// -----------------------------------------------------------------------------
+// Template implementations.
+
+static int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+
+template <typename LabelType>
+void MacroAssembler::SmiNeg(Register dst,
+                            Register src,
+                            LabelType* on_smi_result) {
+  if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    movq(kScratchRegister, src);
+    neg(dst);  // Low 32 bits are retained as zero by negation.
+    // Test if result is zero or Smi::kMinValue.
+    cmpq(dst, kScratchRegister);
+    j(not_equal, on_smi_result);
+    movq(src, kScratchRegister);
+  } else {
+    movq(dst, src);
+    neg(dst);
+    cmpq(dst, src);
+    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+    j(not_equal, on_smi_result);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiAdd(Register dst,
+                            Register src1,
+                            Register src2,
+                            LabelType* on_not_smi_result) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src1);
+    addq(kScratchRegister, src2);
+    j(overflow, on_not_smi_result);
+    movq(dst, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    addq(dst, src2);
+    j(overflow, on_not_smi_result);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSub(Register dst,
+                            Register src1,
+                            Register src2,
+                            LabelType* on_not_smi_result) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    cmpq(dst, src2);
+    j(overflow, on_not_smi_result);
+    subq(dst, src2);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
+    j(overflow, on_not_smi_result);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSub(Register dst,
+                            Register src1,
+                            const Operand& src2,
+                            LabelType* on_not_smi_result) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src2);
+    cmpq(src1, kScratchRegister);
+    j(overflow, on_not_smi_result);
+    subq(src1, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
+    j(overflow, on_not_smi_result);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiMul(Register dst,
+                            Register src1,
+                            Register src2,
+                            LabelType* on_not_smi_result) {
+  ASSERT(!dst.is(src2));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+
+  if (dst.is(src1)) {
+    NearLabel failure, zero_correct_result;
+    movq(kScratchRegister, src1);  // Create backup for later testing.
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, &failure);
+
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    NearLabel correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result);
+
+    movq(dst, kScratchRegister);
+    xor_(dst, src2);
+    j(positive, &zero_correct_result);  // Result was positive zero.
+
+    bind(&failure);  // Reused failure exit, restores src1.
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result);
+
+    bind(&zero_correct_result);
+    xor_(dst, dst);
+
+    bind(&correct_result);
+  } else {
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, on_not_smi_result);
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    NearLabel correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result);
+    // One of src1 and src2 is zero, the check whether the other is
+    // negative.
+    movq(kScratchRegister, src1);
+    xor_(kScratchRegister, src2);
+    j(negative, on_not_smi_result);
+    bind(&correct_result);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiTryAddConstant(Register dst,
+                                       Register src,
+                                       Smi* constant,
+                                       LabelType* on_not_smi_result) {
+  // Does not assume that src is a smi.
+  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src.is(kScratchRegister));
+
+  JumpIfNotSmi(src, on_not_smi_result);
+  Register tmp = (dst.is(src) ? kScratchRegister : dst);
+  LoadSmiConstant(tmp, constant);
+  addq(tmp, src);
+  j(overflow, on_not_smi_result);
+  if (dst.is(src)) {
+    movq(dst, tmp);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiAddConstant(Register dst,
+                                    Register src,
+                                    Smi* constant,
+                                    LabelType* on_not_smi_result) {
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
+    }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+
+    LoadSmiConstant(kScratchRegister, constant);
+    addq(kScratchRegister, src);
+    j(overflow, on_not_smi_result);
+    movq(dst, kScratchRegister);
+  } else {
+    LoadSmiConstant(dst, constant);
+    addq(dst, src);
+    j(overflow, on_not_smi_result);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSubConstant(Register dst,
+                                    Register src,
+                                    Smi* constant,
+                                    LabelType* on_not_smi_result) {
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
+    }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    if (constant->value() == Smi::kMinValue) {
+      // Subtracting min-value from any non-negative value will overflow.
+      // We test the non-negativeness before doing the subtraction.
+      testq(src, src);
+      j(not_sign, on_not_smi_result);
+      LoadSmiConstant(kScratchRegister, constant);
+      subq(dst, kScratchRegister);
+    } else {
+      // Subtract by adding the negation.
+      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
+      addq(kScratchRegister, dst);
+      j(overflow, on_not_smi_result);
+      movq(dst, kScratchRegister);
+    }
+  } else {
+    if (constant->value() == Smi::kMinValue) {
+      // Subtracting min-value from any non-negative value will overflow.
+      // We test the non-negativeness before doing the subtraction.
+      testq(src, src);
+      j(not_sign, on_not_smi_result);
+      LoadSmiConstant(dst, constant);
+      // Adding and subtracting the min-value gives the same result, it only
+      // differs on the overflow bit, which we don't check here.
+      addq(dst, src);
+    } else {
+      // Subtract by adding the negation.
+      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
+      addq(dst, src);
+      j(overflow, on_not_smi_result);
+    }
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiDiv(Register dst,
+                            Register src1,
+                            Register src2,
+                            LabelType* on_not_smi_result) {
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+
+  // Check for 0 divisor (result is +/-Infinity).
+  NearLabel positive_divisor;
+  testq(src2, src2);
+  j(zero, on_not_smi_result);
+
+  if (src1.is(rax)) {
+    movq(kScratchRegister, src1);
+  }
+  SmiToInteger32(rax, src1);
+  // We need to rule out dividing Smi::kMinValue by -1, since that would
+  // overflow in idiv and raise an exception.
+  // We combine this with negative zero test (negative zero only happens
+  // when dividing zero by a negative number).
+
+  // We overshoot a little and go to slow case if we divide min-value
+  // by any negative value, not just -1.
+  NearLabel safe_div;
+  testl(rax, Immediate(0x7fffffff));
+  j(not_zero, &safe_div);
+  testq(src2, src2);
+  if (src1.is(rax)) {
+    j(positive, &safe_div);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result);
+  } else {
+    j(negative, on_not_smi_result);
+  }
+  bind(&safe_div);
+
+  SmiToInteger32(src2, src2);
+  // Sign extend src1 into edx:eax.
+  cdq();
+  idivl(src2);
+  Integer32ToSmi(src2, src2);
+  // Check that the remainder is zero.
+  testl(rdx, rdx);
+  if (src1.is(rax)) {
+    NearLabel smi_result;
+    j(zero, &smi_result);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result);
+    bind(&smi_result);
+  } else {
+    j(not_zero, on_not_smi_result);
+  }
+  if (!dst.is(src1) && src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  Integer32ToSmi(dst, rax);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiMod(Register dst,
+                            Register src1,
+                            Register src2,
+                            LabelType* on_not_smi_result) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+  ASSERT(!src1.is(src2));
+
+  testq(src2, src2);
+  j(zero, on_not_smi_result);
+
+  if (src1.is(rax)) {
+    movq(kScratchRegister, src1);
+  }
+  SmiToInteger32(rax, src1);
+  SmiToInteger32(src2, src2);
+
+  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+  NearLabel safe_div;
+  cmpl(rax, Immediate(Smi::kMinValue));
+  j(not_equal, &safe_div);
+  cmpl(src2, Immediate(-1));
+  j(not_equal, &safe_div);
+  // Retag inputs and go slow case.
+  Integer32ToSmi(src2, src2);
+  if (src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  jmp(on_not_smi_result);
+  bind(&safe_div);
+
+  // Sign extend eax into edx:eax.
+  cdq();
+  idivl(src2);
+  // Restore smi tags on inputs.
+  Integer32ToSmi(src2, src2);
+  if (src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  // Check for a negative zero result.  If the result is zero, and the
+  // dividend is negative, go slow to return a floating point negative zero.
+  NearLabel smi_result;
+  testl(rdx, rdx);
+  j(not_zero, &smi_result);
+  testq(src1, src1);
+  j(negative, on_not_smi_result);
+  bind(&smi_result);
+  Integer32ToSmi(dst, rdx);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiShiftLogicalRightConstant(
+    Register dst, Register src, int shift_value, LabelType* on_not_smi_result) {
+  // Logic right shift interprets its result as an *unsigned* number.
+  if (dst.is(src)) {
+    UNIMPLEMENTED();  // Not used.
+  } else {
+    movq(dst, src);
+    if (shift_value == 0) {
+      testq(dst, dst);
+      j(negative, on_not_smi_result);
+    }
+    shr(dst, Immediate(shift_value + kSmiShift));
+    shl(dst, Immediate(kSmiShift));
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+                                          Register src1,
+                                          Register src2,
+                                          LabelType* on_not_smi_result) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(rcx));
+  NearLabel result_ok;
+  if (src1.is(rcx) || src2.is(rcx)) {
+    movq(kScratchRegister, rcx);
+  }
+  if (!dst.is(src1)) {
+    movq(dst, src1);
+  }
+  SmiToInteger32(rcx, src2);
+  orl(rcx, Immediate(kSmiShift));
+  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
+  shl(dst, Immediate(kSmiShift));
+  testq(dst, dst);
+  if (src1.is(rcx) || src2.is(rcx)) {
+    NearLabel positive_result;
+    j(positive, &positive_result);
+    if (src1.is(rcx)) {
+      movq(src1, kScratchRegister);
+    } else {
+      movq(src2, kScratchRegister);
+    }
+    jmp(on_not_smi_result);
+    bind(&positive_result);
+  } else {
+    j(negative, on_not_smi_result);  // src2 was zero and src1 negative.
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SelectNonSmi(Register dst,
+                                  Register src1,
+                                  Register src2,
+                                  LabelType* on_not_smis) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(src1));
+  ASSERT(!dst.is(src2));
+  // Both operands must not be smis.
+#ifdef DEBUG
+  if (allow_stub_calls()) {  // Check contains a stub call.
+    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+  }
+#endif
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT_EQ(0, Smi::FromInt(0));
+  movl(kScratchRegister, Immediate(kSmiTagMask));
+  and_(kScratchRegister, src1);
+  testl(kScratchRegister, src2);
+  // If non-zero then both are smis.
+  j(not_zero, on_not_smis);
+
+  // Exactly one operand is a smi.
+  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+  subq(kScratchRegister, Immediate(1));
+  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+  movq(dst, src1);
+  xor_(dst, src2);
+  and_(dst, kScratchRegister);
+  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+  xor_(dst, src1);
+  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfSmi(Register src, LabelType* on_smi) {
+  ASSERT_EQ(0, kSmiTag);
+  Condition smi = CheckSmi(src);
+  j(smi, on_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotSmi(Register src, LabelType* on_not_smi) {
+  Condition smi = CheckSmi(src);
+  j(NegateCondition(smi), on_not_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotPositiveSmi(Register src,
+                                          LabelType* on_not_positive_smi) {
+  Condition positive_smi = CheckPositiveSmi(src);
+  j(NegateCondition(positive_smi), on_not_positive_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+                                             Smi* constant,
+                                             LabelType* on_equals) {
+  SmiCompare(src, constant);
+  j(equal, on_equals);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotValidSmiValue(Register src,
+                                            LabelType* on_invalid) {
+  Condition is_valid = CheckInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
+                                                LabelType* on_invalid) {
+  Condition is_valid = CheckUInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothSmi(Register src1,
+                                      Register src2,
+                                      LabelType* on_not_both_smi) {
+  Condition both_smi = CheckBothSmi(src1, src2);
+  j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1,
+                                              Register src2,
+                                              LabelType* on_not_both_smi) {
+  Condition both_smi = CheckBothPositiveSmi(src1, src2);
+  j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
+                                                         Register second_object,
+                                                         Register scratch1,
+                                                         Register scratch2,
+                                                         LabelType* on_fail) {
+  // Check that both objects are not smis.
+  Condition either_smi = CheckEitherSmi(first_object, second_object);
+  j(either_smi, on_fail);
+
+  // Load instance type for both strings.
+  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+  // Check that both are flat ascii strings.
+  ASSERT(kNotStringTag != 0);
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+  andl(scratch1, Immediate(kFlatAsciiStringMask));
+  andl(scratch2, Immediate(kFlatAsciiStringMask));
+  // Interleave the bits to check both scratch1 and scratch2 in one test.
+  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+  cmpl(scratch1,
+       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+  j(not_equal, on_fail);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+    Register instance_type,
+    Register scratch,
+    LabelType *failure) {
+  if (!scratch.is(instance_type)) {
+    movl(scratch, instance_type);
+  }
+
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+
+  andl(scratch, Immediate(kFlatAsciiStringMask));
+  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+  j(not_equal, failure);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+    Register first_object_instance_type,
+    Register second_object_instance_type,
+    Register scratch1,
+    Register scratch2,
+    LabelType* on_fail) {
+  // Load instance type for both strings.
+  movq(scratch1, first_object_instance_type);
+  movq(scratch2, second_object_instance_type);
+
+  // Check that both are flat ascii strings.
+  ASSERT(kNotStringTag != 0);
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+  andl(scratch1, Immediate(kFlatAsciiStringMask));
+  andl(scratch2, Immediate(kFlatAsciiStringMask));
+  // Interleave the bits to check both scratch1 and scratch2 in one test.
+  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+  cmpl(scratch1,
+       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+  j(not_equal, on_fail);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                LabelType* branch) {
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    // The mask isn't really an address.  We load it as an external reference in
+    // case the size of the new space is different between the snapshot maker
+    // and the running system.
+    if (scratch.is(object)) {
+      movq(kScratchRegister, ExternalReference::new_space_mask());
+      and_(scratch, kScratchRegister);
+    } else {
+      movq(scratch, ExternalReference::new_space_mask());
+      and_(scratch, object);
+    }
+    movq(kScratchRegister, ExternalReference::new_space_start());
+    cmpq(scratch, kScratchRegister);
+    j(cc, branch);
+  } else {
+    ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
+    intptr_t new_space_start =
+        reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
+    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+    if (scratch.is(object)) {
+      addq(scratch, kScratchRegister);
+    } else {
+      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+    }
+    and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+    j(cc, branch);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    Register code_register,
+                                    LabelType* done,
+                                    InvokeFlag flag) {
+  bool definitely_matches = false;
+  NearLabel invoke;
+  if (expected.is_immediate()) {
+    ASSERT(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      Set(rax, actual.immediate());
+      if (expected.immediate() ==
+              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+        // Don't worry about adapting arguments for built-ins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        Set(rbx, expected.immediate());
+      }
+    }
+  } else {
+    if (actual.is_immediate()) {
+      // Expected is in register, actual is immediate. This is the
+      // case when we invoke function values without going through the
+      // IC mechanism.
+      cmpq(expected.reg(), Immediate(actual.immediate()));
+      j(equal, &invoke);
+      ASSERT(expected.reg().is(rbx));
+      Set(rax, actual.immediate());
+    } else if (!expected.reg().is(actual.reg())) {
+      // Both expected and actual are in (different) registers. This
+      // is the case when we invoke functions using call and apply.
+      cmpq(expected.reg(), actual.reg());
+      j(equal, &invoke);
+      ASSERT(actual.reg().is(rax));
+      ASSERT(expected.reg().is(rbx));
+    }
+  }
+
+  if (!definitely_matches) {
+    Handle<Code> adaptor =
+        Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+    if (!code_constant.is_null()) {
+      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    } else if (!code_register.is(rdx)) {
+      movq(rdx, code_register);
+    }
+
+    if (flag == CALL_FUNCTION) {
+      Call(adaptor, RelocInfo::CODE_TARGET);
+      jmp(done);
+    } else {
+      Jump(adaptor, RelocInfo::CODE_TARGET);
+    }
+    bind(&invoke);
+  }
+}
+
 
 } }  // namespace v8::internal
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 765a90c..68b18a2 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -216,7 +216,12 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype) {
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  // Check we're still in the same context.
+  __ Move(prototype, Top::global());
+  __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)),
+          prototype);
+  __ j(not_equal, miss);
   // Get the global function with the given index.
   JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
   // Load its initial map. The global functions all have initial maps.
@@ -964,7 +969,7 @@
         __ j(above_equal, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::STRING_FUNCTION_INDEX, rax);
+            masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
                         rbx, rdx, rdi, name, &miss);
       }
@@ -983,7 +988,7 @@
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::NUMBER_FUNCTION_INDEX, rax);
+            masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
                         rbx, rdx, rdi, name, &miss);
       }
@@ -1004,7 +1009,7 @@
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::BOOLEAN_FUNCTION_INDEX, rax);
+            masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
                         rbx, rdx, rdi, name, &miss);
       }
@@ -1358,7 +1363,8 @@
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            rax);
+                                            rax,
+                                            &miss);
   ASSERT(object != holder);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
                   rbx, rdx, rdi, name, &miss);
@@ -1429,7 +1435,8 @@
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            rax);
+                                            rax,
+                                            &miss);
   ASSERT(object != holder);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
                   rbx, rdx, rdi, name, &miss);
@@ -1541,6 +1548,16 @@
 }
 
 
+Object* CallStubCompiler::CompileMathFloorCall(Object* object,
+                                               JSObject* holder,
+                                               JSGlobalPropertyCell* cell,
+                                               JSFunction* function,
+                                               String* name) {
+  // TODO(872): implement this.
+  return Heap::undefined_value();
+}
+
+
 Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
                                                  JSObject* holder,
                                                  String* name) {
@@ -1684,7 +1701,10 @@
   Failure* failure = Failure::InternalError();
   bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx, rdi,
                                       callback, name, &miss, &failure);
-  if (!success) return failure;
+  if (!success) {
+    miss.Unuse();
+    return failure;
+  }
 
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1740,7 +1760,10 @@
                                              name,
                                              rdx,
                                              &miss);
-    if (cell->IsFailure()) return cell;
+    if (cell->IsFailure()) {
+      miss.Unuse();
+      return cell;
+    }
   }
 
   // Return undefined if maps of the full prototype chain are still the
@@ -1845,12 +1868,12 @@
     __ Check(not_equal, "DontDelete cells can't contain the hole");
   }
 
-  __ IncrementCounter(&Counters::named_load_global_inline, 1);
+  __ IncrementCounter(&Counters::named_load_global_stub, 1);
   __ movq(rax, rbx);
   __ ret(0);
 
   __ bind(&miss);
-  __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+  __ IncrementCounter(&Counters::named_load_global_stub_miss, 1);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
@@ -1878,7 +1901,10 @@
   Failure* failure = Failure::InternalError();
   bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi,
                                       callback, name, &miss, &failure);
-  if (!success) return failure;
+  if (!success) {
+    miss.Unuse();
+    return failure;
+  }
 
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_callback, 1);
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index 88e7cc8..e88a993 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -259,7 +259,7 @@
 
   VariableProxy* proxy = expr->AsVariableProxy();
   if (proxy != NULL) {
-    Slot* slot = proxy->var()->slot();
+    Slot* slot = proxy->var()->AsSlot();
     if (slot->type() == Slot::LOCAL) {
       PushLocalAt(slot->index());
       return;