Version 2.4.5

Changed the RegExp benchmark to exercise the regexp engine on different inputs by scrambling the input strings.

Fixed a bug in keyed loads on strings.

Fixed a bug with loading global function prototypes.

Fixed a bug with profiling RegExp calls (issue http://crbug.com/55999).

Performance improvements on all platforms.



git-svn-id: http://v8.googlecode.com/svn/trunk@5502 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.cc b/src/api.cc
index e09d4c9..d6ed8ae 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -4433,7 +4433,7 @@
 
 unsigned CpuProfileNode::GetCallUid() const {
   IsDeadCheck("v8::CpuProfileNode::GetCallUid");
-  return reinterpret_cast<const i::ProfileNode*>(this)->entry()->call_uid();
+  return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid();
 }
 
 
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index 4743439..b0c0990 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -37,17 +37,8 @@
 namespace internal {
 
 
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
-  if (fp == 0) return NONE;
-  // Compute frame type and stack pointer.
-  Address sp = fp + ExitFrameConstants::kSPOffset;
-
-  // Fill in the state.
-  state->sp = sp;
-  state->fp = fp;
-  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
-  ASSERT(*state->pc_address != NULL);
-  return EXIT;
+Address ExitFrame::ComputeStackPointer(Address fp) {
+  return fp + ExitFrameConstants::kSPOffset;
 }
 
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index c776d67..f5d1217 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -620,7 +620,7 @@
       __ pop(r2);  // Receiver.
 
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ Call(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       // Value in r0 is ignored (declarations are statements).
     }
   }
@@ -956,7 +956,7 @@
                                                    slow));
           __ mov(r0, Operand(key_literal->handle()));
           Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-          __ Call(ic, RelocInfo::CODE_TARGET);
+          EmitCallIC(ic, RelocInfo::CODE_TARGET);
           __ jmp(done);
         }
       }
@@ -1022,7 +1022,7 @@
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ Call(ic, mode);
+  EmitCallIC(ic, mode);
 }
 
 
@@ -1041,7 +1041,7 @@
     __ ldr(r0, CodeGenerator::GlobalObject());
     __ mov(r2, Operand(var->name()));
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
     Apply(context, r0);
 
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -1100,7 +1100,7 @@
 
     // Call keyed load IC. It has arguments key and receiver in r0 and r1.
     Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
     Apply(context, r0);
   }
 }
@@ -1189,7 +1189,7 @@
           __ mov(r2, Operand(key->handle()));
           __ ldr(r1, MemOperand(sp));
           Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-          __ Call(ic, RelocInfo::CODE_TARGET);
+          EmitCallIC(ic, RelocInfo::CODE_TARGET);
           break;
         }
         // Fall through.
@@ -1409,7 +1409,7 @@
   __ mov(r2, Operand(key->handle()));
   // Call load IC. It has arguments receiver and property name r0 and r2.
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 }
 
 
@@ -1417,7 +1417,7 @@
   SetSourcePosition(prop->position());
   // Call keyed load IC. It has arguments key and receiver in r0 and r1.
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 }
 
 
@@ -1475,7 +1475,7 @@
       __ pop(r0);  // Restore value.
       __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      __ Call(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
     case KEYED_PROPERTY: {
@@ -1486,7 +1486,7 @@
       __ pop(r2);
       __ pop(r0);  // Restore value.
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ Call(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
   }
@@ -1509,7 +1509,7 @@
     __ mov(r2, Operand(var->name()));
     __ ldr(r1, CodeGenerator::GlobalObject());
     Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
     // Perform the assignment for non-const variables and for initialization
@@ -1598,7 +1598,7 @@
   }
 
   Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1642,7 +1642,7 @@
   }
 
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1691,7 +1691,7 @@
   // Call the IC initialization code.
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
-  __ Call(ic, mode);
+  EmitCallIC(ic, mode);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   Apply(context_, r0);
@@ -1715,7 +1715,7 @@
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
                                                               in_loop);
-  __ Call(ic, mode);
+  EmitCallIC(ic, mode);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   Apply(context_, r0);
@@ -1854,7 +1854,7 @@
         __ pop(r1);  // We do not need to keep the receiver.
 
         Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-        __ Call(ic, RelocInfo::CODE_TARGET);
+        EmitCallIC(ic, RelocInfo::CODE_TARGET);
         __ ldr(r1, CodeGenerator::GlobalObject());
         __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
         __ Push(r0, r1);  // Function, receiver.
@@ -2769,7 +2769,7 @@
     __ mov(r2, Operand(expr->name()));
     Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
                                                            NOT_IN_LOOP);
-    __ Call(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
     // Restore context register.
     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3065,7 +3065,7 @@
       __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
       __ pop(r1);
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      __ Call(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
         if (context_ != Expression::kEffect) {
           ApplyTOS(context_);
@@ -3079,7 +3079,7 @@
       __ pop(r1);  // Key.
       __ pop(r2);  // Receiver.
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ Call(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
         if (context_ != Expression::kEffect) {
           ApplyTOS(context_);
@@ -3102,7 +3102,7 @@
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    __ Call(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
     if (where == kStack) __ push(r0);
   } else if (proxy != NULL &&
              proxy->var()->slot() != NULL &&
@@ -3365,10 +3365,21 @@
 }
 
 
-Register FullCodeGenerator::result_register() { return r0; }
+Register FullCodeGenerator::result_register() {
+  return r0;
+}
 
 
-Register FullCodeGenerator::context_register() { return cp; }
+Register FullCodeGenerator::context_register() {
+  return cp;
+}
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+  ASSERT(mode == RelocInfo::CODE_TARGET ||
+         mode == RelocInfo::CODE_TARGET_CONTEXT);
+  __ Call(ic, mode);
+}
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 1a76db2..d5a700c 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -967,6 +967,14 @@
 }
 
 
+bool LoadIC::PatchInlinedContextualLoad(Address address,
+                                        Object* map,
+                                        Object* cell) {
+  // TODO(<bug#>): implement this.
+  return false;
+}
+
+
 bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
   // Find the end of the inlined code for the store if there is an
   // inlined version of the store.
@@ -1236,7 +1244,6 @@
   //  -- r1     : receiver
   // -----------------------------------
   Label miss;
-  Label index_out_of_range;
 
   Register receiver = r1;
   Register index = r0;
@@ -1251,7 +1258,7 @@
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
-                                          &index_out_of_range,
+                                          &miss,  // When index out of range.
                                           STRING_INDEX_IS_ARRAY_INDEX);
   char_at_generator.GenerateFast(masm);
   __ Ret();
@@ -1259,10 +1266,6 @@
   ICRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm, call_helper);
 
-  __ bind(&index_out_of_range);
-  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-  __ Ret();
-
   __ bind(&miss);
   GenerateMiss(masm);
 }
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 0da5f64..070e352 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -266,7 +266,12 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype) {
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  // Check we're still in the same context.
+  __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ Move(ip, Top::global());
+  __ cmp(prototype, ip);
+  __ b(ne, miss);
   // Get the global function with the given index.
   JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
   // Load its initial map. The global functions all have initial maps.
@@ -1434,7 +1439,8 @@
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            r0);
+                                            r0,
+                                            &miss);
   ASSERT(object != holder);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
                   r1, r3, r4, name, &miss);
@@ -1505,7 +1511,8 @@
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            r0);
+                                            r0,
+                                            &miss);
   ASSERT(object != holder);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
                   r1, r3, r4, name, &miss);
@@ -1626,6 +1633,16 @@
 }
 
 
+Object* CallStubCompiler::CompileMathFloorCall(Object* object,
+                                               JSObject* holder,
+                                               JSGlobalPropertyCell* cell,
+                                               JSFunction* function,
+                                               String* name) {
+  // TODO(872): implement this.
+  return Heap::undefined_value();
+}
+
+
 Object* CallStubCompiler::CompileCallConstant(Object* object,
                                               JSObject* holder,
                                               JSFunction* function,
@@ -1705,7 +1722,7 @@
         __ b(hs, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::STRING_FUNCTION_INDEX, r0);
+            masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
                         r1, r4, name, &miss);
       }
@@ -1725,7 +1742,7 @@
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::NUMBER_FUNCTION_INDEX, r0);
+            masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
                         r1, r4, name, &miss);
       }
@@ -1748,7 +1765,7 @@
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::BOOLEAN_FUNCTION_INDEX, r0);
+            masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
                         r1, r4, name, &miss);
       }
@@ -2212,11 +2229,11 @@
   }
 
   __ mov(r0, r4);
-  __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
+  __ IncrementCounter(&Counters::named_load_global_stub, 1, r1, r3);
   __ Ret();
 
   __ bind(&miss);
-  __ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3);
+  __ IncrementCounter(&Counters::named_load_global_stub_miss, 1, r1, r3);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 6e6c2c6..1f12502 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1344,33 +1344,41 @@
 }
 
 
-static void InstallCustomCallGenerator(
-    Handle<JSFunction> holder_function,
-    CallStubCompiler::CustomGeneratorOwner owner_flag,
-    const char* function_name,
-    int id) {
-  Handle<JSObject> owner;
-  if (owner_flag == CallStubCompiler::FUNCTION) {
-    owner = Handle<JSObject>::cast(holder_function);
-  } else {
-    ASSERT(owner_flag == CallStubCompiler::INSTANCE_PROTOTYPE);
-    owner = Handle<JSObject>(
-        JSObject::cast(holder_function->instance_prototype()));
+static Handle<JSObject> ResolveCustomCallGeneratorHolder(
+    Handle<Context> global_context,
+    const char* holder_expr) {
+  Handle<GlobalObject> global(global_context->global());
+  const char* period_pos = strchr(holder_expr, '.');
+  if (period_pos == NULL) {
+    return Handle<JSObject>::cast(
+        GetProperty(global, Factory::LookupAsciiSymbol(holder_expr)));
   }
+  ASSERT_EQ(".prototype", period_pos);
+  Vector<const char> property(holder_expr,
+                              static_cast<int>(period_pos - holder_expr));
+  Handle<JSFunction> function = Handle<JSFunction>::cast(
+      GetProperty(global, Factory::LookupSymbol(property)));
+  return Handle<JSObject>(JSObject::cast(function->prototype()));
+}
+
+
+static void InstallCustomCallGenerator(Handle<JSObject> holder,
+                                       const char* function_name,
+                                       int id) {
   Handle<String> name = Factory::LookupAsciiSymbol(function_name);
-  Handle<JSFunction> function(JSFunction::cast(owner->GetProperty(*name)));
+  Handle<JSFunction> function(JSFunction::cast(holder->GetProperty(*name)));
   function->shared()->set_function_data(Smi::FromInt(id));
 }
 
 
 void Genesis::InstallCustomCallGenerators() {
   HandleScope scope;
-#define INSTALL_CALL_GENERATOR(holder_fun, owner_flag, fun_name, name)    \
-  {                                                                       \
-    Handle<JSFunction> holder(global_context()->holder_fun##_function()); \
-    const int id = CallStubCompiler::k##name##CallGenerator;              \
-    InstallCustomCallGenerator(holder, CallStubCompiler::owner_flag,      \
-                               #fun_name, id);                            \
+#define INSTALL_CALL_GENERATOR(holder_expr, fun_name, name)     \
+  {                                                             \
+    Handle<JSObject> holder = ResolveCustomCallGeneratorHolder( \
+        global_context(), #holder_expr);                        \
+    const int id = CallStubCompiler::k##name##CallGenerator;    \
+    InstallCustomCallGenerator(holder, #fun_name, id);          \
   }
   CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR)
 #undef INSTALL_CALL_GENERATOR
diff --git a/src/conversions.cc b/src/conversions.cc
index 90cdc77..f15a804 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -956,8 +956,9 @@
 
 
 char* DoubleToExponentialCString(double value, int f) {
+  const int kMaxDigitsAfterPoint = 20;
   // f might be -1 to signal that f was undefined in JavaScript.
-  ASSERT(f >= -1 && f <= 20);
+  ASSERT(f >= -1 && f <= kMaxDigitsAfterPoint);
 
   bool negative = false;
   if (value < 0) {
@@ -969,29 +970,60 @@
   int decimal_point;
   int sign;
   char* decimal_rep = NULL;
+  bool used_gay_dtoa = false;
+  // f corresponds to the digits after the point. There is always one digit
+  // before the point. The number of requested_digits equals hence f + 1.
+  // And we have to add one character for the null-terminator.
+  const int kV8DtoaBufferCapacity = kMaxDigitsAfterPoint + 1 + 1;
+  // Make sure that the buffer is big enough, even if we fall back to the
+  // shortest representation (which happens when f equals -1).
+  ASSERT(kBase10MaximalLength <= kMaxDigitsAfterPoint + 1);
+  char v8_dtoa_buffer[kV8DtoaBufferCapacity];
+  int decimal_rep_length;
+
   if (f == -1) {
-    decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
-    f = StrLength(decimal_rep) - 1;
+    if (DoubleToAscii(value, DTOA_SHORTEST, 0,
+                      Vector<char>(v8_dtoa_buffer, kV8DtoaBufferCapacity),
+                      &sign, &decimal_rep_length, &decimal_point)) {
+      f = decimal_rep_length - 1;
+      decimal_rep = v8_dtoa_buffer;
+    } else {
+      decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
+      decimal_rep_length = StrLength(decimal_rep);
+      f = decimal_rep_length - 1;
+      used_gay_dtoa = true;
+    }
   } else {
-    decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
+    if (DoubleToAscii(value, DTOA_PRECISION, f + 1,
+                      Vector<char>(v8_dtoa_buffer, kV8DtoaBufferCapacity),
+                      &sign, &decimal_rep_length, &decimal_point)) {
+      decimal_rep = v8_dtoa_buffer;
+    } else {
+      decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
+      decimal_rep_length = StrLength(decimal_rep);
+      used_gay_dtoa = true;
+    }
   }
-  int decimal_rep_length = StrLength(decimal_rep);
   ASSERT(decimal_rep_length > 0);
   ASSERT(decimal_rep_length <= f + 1);
-  USE(decimal_rep_length);
 
   int exponent = decimal_point - 1;
   char* result =
       CreateExponentialRepresentation(decimal_rep, exponent, negative, f+1);
 
-  freedtoa(decimal_rep);
+  if (used_gay_dtoa) {
+    freedtoa(decimal_rep);
+  }
 
   return result;
 }
 
 
 char* DoubleToPrecisionCString(double value, int p) {
-  ASSERT(p >= 1 && p <= 21);
+  const int kMinimalDigits = 1;
+  const int kMaximalDigits = 21;
+  ASSERT(p >= kMinimalDigits && p <= kMaximalDigits);
+  USE(kMinimalDigits);
 
   bool negative = false;
   if (value < 0) {
@@ -1002,8 +1034,22 @@
   // Find a sufficiently precise decimal representation of n.
   int decimal_point;
   int sign;
-  char* decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
-  int decimal_rep_length = StrLength(decimal_rep);
+  char* decimal_rep = NULL;
+  bool used_gay_dtoa = false;
+  // Add one for the terminating null character.
+  const int kV8DtoaBufferCapacity = kMaximalDigits + 1;
+  char v8_dtoa_buffer[kV8DtoaBufferCapacity];
+  int decimal_rep_length;
+
+  if (DoubleToAscii(value, DTOA_PRECISION, p,
+                    Vector<char>(v8_dtoa_buffer, kV8DtoaBufferCapacity),
+                    &sign, &decimal_rep_length, &decimal_point)) {
+    decimal_rep = v8_dtoa_buffer;
+  } else {
+    decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
+    decimal_rep_length = StrLength(decimal_rep);
+    used_gay_dtoa = true;
+  }
   ASSERT(decimal_rep_length <= p);
 
   int exponent = decimal_point - 1;
@@ -1047,7 +1093,9 @@
     result = builder.Finalize();
   }
 
-  freedtoa(decimal_rep);
+  if (used_gay_dtoa) {
+    freedtoa(decimal_rep);
+  }
   return result;
 }
 
diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h
index cb7fdd8..5df5893 100644
--- a/src/cpu-profiler-inl.h
+++ b/src/cpu-profiler-inl.h
@@ -82,14 +82,11 @@
 
 bool ProfilerEventsProcessor::FilterOutCodeCreateEvent(
     Logger::LogEventsAndTags tag) {
-  // In browser mode, leave only callbacks and non-native JS entries.
-  // We filter out regular expressions as currently we can't tell
-  // whether they origin from native scripts, so let's not confise people by
-  // showing them weird regexes they didn't wrote.
   return FLAG_prof_browser_mode
       && (tag != Logger::CALLBACK_TAG
           && tag != Logger::FUNCTION_TAG
           && tag != Logger::LAZY_COMPILE_TAG
+          && tag != Logger::REG_EXP_TAG
           && tag != Logger::SCRIPT_TAG);
 }
 
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index 0b02e21..34eb0f0 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -45,7 +45,7 @@
                      ScriptCollected: 6 };
 
 // Types of exceptions that can be broken upon.
-Debug.ExceptionBreak = { All : 0,
+Debug.ExceptionBreak = { Caught : 0,
                          Uncaught: 1 };
 
 // The different types of steps.
@@ -87,7 +87,27 @@
       this.value = !!value;
       %SetDisableBreak(!this.value);
     }
-  }
+  },
+  breakOnCaughtException: {
+    getValue: function() { return Debug.isBreakOnException(); },
+    setValue: function(value) {
+      if (value) {
+        Debug.setBreakOnException();
+      } else {
+        Debug.clearBreakOnException();
+      }
+    }
+  },
+  breakOnUncaughtException: {
+    getValue: function() { return Debug.isBreakOnUncaughtException(); },
+    setValue: function(value) {
+      if (value) {
+        Debug.setBreakOnUncaughtException();
+      } else {
+        Debug.clearBreakOnUncaughtException();
+      }
+    }
+  },
 };
 
 
@@ -781,11 +801,15 @@
 }
 
 Debug.setBreakOnException = function() {
-  return %ChangeBreakOnException(Debug.ExceptionBreak.All, true);
+  return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, true);
 };
 
 Debug.clearBreakOnException = function() {
-  return %ChangeBreakOnException(Debug.ExceptionBreak.All, false);
+  return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, false);
+};
+
+Debug.isBreakOnException = function() {
+  return !!%IsBreakOnException(Debug.ExceptionBreak.Caught);
 };
 
 Debug.setBreakOnUncaughtException = function() {
@@ -796,6 +820,10 @@
   return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
 };
 
+Debug.isBreakOnUncaughtException = function() {
+  return !!%IsBreakOnException(Debug.ExceptionBreak.Uncaught);
+};
+
 Debug.showBreakPoints = function(f, full) {
   if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
   var source = full ? this.scriptSource(f) : this.source(f);
diff --git a/src/debug.cc b/src/debug.cc
index 87780d3..24b1d31 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1200,6 +1200,15 @@
 }
 
 
+bool Debug::IsBreakOnException(ExceptionBreakType type) {
+  if (type == BreakUncaughtException) {
+    return break_on_uncaught_exception_;
+  } else {
+    return break_on_exception_;
+  }
+}
+
+
 void Debug::PrepareStep(StepAction step_action, int step_count) {
   HandleScope scope;
   ASSERT(Debug::InDebugger());
diff --git a/src/debug.h b/src/debug.h
index 8b3b29e..0d63085 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -236,6 +236,7 @@
   static void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
   static void FloodHandlerWithOneShot();
   static void ChangeBreakOnException(ExceptionBreakType type, bool enable);
+  static bool IsBreakOnException(ExceptionBreakType type);
   static void PrepareStep(StepAction step_action, int step_count);
   static void ClearStepping();
   static bool StepNextContinue(BreakLocationIterator* break_location_iterator,
diff --git a/src/dtoa.cc b/src/dtoa.cc
index e3dcbf2..f4141eb 100644
--- a/src/dtoa.cc
+++ b/src/dtoa.cc
@@ -65,11 +65,12 @@
 
   switch (mode) {
     case DTOA_SHORTEST:
-      return FastDtoa(v, buffer, length, point);
+      return FastDtoa(v, FAST_DTOA_SHORTEST, 0, buffer, length, point);
     case DTOA_FIXED:
       return FastFixedDtoa(v, requested_digits, buffer, length, point);
-    default:
-      break;
+    case DTOA_PRECISION:
+      return FastDtoa(v, FAST_DTOA_PRECISION, requested_digits,
+                      buffer, length, point);
   }
   return false;
 }
diff --git a/src/fast-dtoa.cc b/src/fast-dtoa.cc
index b4b7be0..d2a00cc 100644
--- a/src/fast-dtoa.cc
+++ b/src/fast-dtoa.cc
@@ -42,8 +42,8 @@
 //
 // A different range might be chosen on a different platform, to optimize digit
 // generation, but a smaller range requires more powers of ten to be cached.
-static const int minimal_target_exponent = -60;
-static const int maximal_target_exponent = -32;
+static const int kMinimalTargetExponent = -60;
+static const int kMaximalTargetExponent = -32;
 
 
 // Adjusts the last digit of the generated number, and screens out generated
@@ -61,13 +61,13 @@
 // Output: returns true if the buffer is guaranteed to contain the closest
 //    representable number to the input.
 //  Modifies the generated digits in the buffer to approach (round towards) w.
-bool RoundWeed(Vector<char> buffer,
-               int length,
-               uint64_t distance_too_high_w,
-               uint64_t unsafe_interval,
-               uint64_t rest,
-               uint64_t ten_kappa,
-               uint64_t unit) {
+static bool RoundWeed(Vector<char> buffer,
+                      int length,
+                      uint64_t distance_too_high_w,
+                      uint64_t unsafe_interval,
+                      uint64_t rest,
+                      uint64_t ten_kappa,
+                      uint64_t unit) {
   uint64_t small_distance = distance_too_high_w - unit;
   uint64_t big_distance = distance_too_high_w + unit;
   // Let w_low  = too_high - big_distance, and
@@ -75,7 +75,7 @@
   // Note: w_low < w < w_high
   //
   // The real w (* unit) must lie somewhere inside the interval
-  // ]w_low; w_low[ (often written as "(w_low; w_low)")
+  // ]w_low; w_high[ (often written as "(w_low; w_high)")
 
   // Basically the buffer currently contains a number in the unsafe interval
   // ]too_low; too_high[ with too_low < w < too_high
@@ -122,10 +122,10 @@
   // inside the safe interval then we simply do not know and bail out (returning
   // false).
   //
-  // Similarly we have to take into account the imprecision of 'w' when rounding
-  // the buffer. If we have two potential representations we need to make sure
-  // that the chosen one is closer to w_low and w_high since v can be anywhere
-  // between them.
+  // Similarly we have to take into account the imprecision of 'w' when finding
+  // the closest representation of 'w'. If we have two potential
+  // representations, and one is closer to both w_low and w_high, then we know
+  // it is closer to the actual value v.
   //
   // By generating the digits of too_high we got the largest (closest to
   // too_high) buffer that is still in the unsafe interval. In the case where
@@ -139,6 +139,9 @@
   //              (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high
   // Instead of using the buffer directly we use its distance to too_high.
   // Conceptually rest ~= too_high - buffer
+  // We need to do the following tests in this order to avoid over- and
+  // underflows.
+  ASSERT(rest <= unsafe_interval);
   while (rest < small_distance &&  // Negated condition 1
          unsafe_interval - rest >= ten_kappa &&  // Negated condition 2
          (rest + ten_kappa < small_distance ||  // buffer{-1} > w_high
@@ -166,6 +169,62 @@
 }
 
 
+// Rounds the buffer upwards if the result is closer to v by possibly adding
+// 1 to the buffer. If the precision of the calculation is not sufficient to
+// round correctly, return false.
+// The rounding might shift the whole buffer in which case the kappa is
+// adjusted. For example "99", kappa = 3 might become "10", kappa = 4.
+//
+// If 2*rest > ten_kappa then the buffer needs to be round up.
+// rest can have an error of +/- 1 unit. This function accounts for the
+// imprecision and returns false, if the rounding direction cannot be
+// unambiguously determined.
+//
+// Precondition: rest < ten_kappa.
+static bool RoundWeedCounted(Vector<char> buffer,
+                             int length,
+                             uint64_t rest,
+                             uint64_t ten_kappa,
+                             uint64_t unit,
+                             int* kappa) {
+  ASSERT(rest < ten_kappa);
+  // The following tests are done in a specific order to avoid overflows. They
+  // will work correctly with any uint64 values of rest < ten_kappa and unit.
+  //
+  // If the unit is too big, then we don't know which way to round. For example
+  // a unit of 50 means that the real number lies within rest +/- 50. If
+  // 10^kappa == 40 then there is no way to tell which way to round.
+  if (unit >= ten_kappa) return false;
+  // Even if unit is just half the size of 10^kappa we are already completely
+  // lost. (And after the previous test we know that the expression will not
+  // over/underflow.)
+  if (ten_kappa - unit <= unit) return false;
+  // If 2 * (rest + unit) <= 10^kappa we can safely round down.
+  if ((ten_kappa - rest > rest) && (ten_kappa - 2 * rest >= 2 * unit)) {
+    return true;
+  }
+  // If 2 * (rest - unit) >= 10^kappa, then we can safely round up.
+  if ((rest > unit) && (ten_kappa - (rest - unit) <= (rest - unit))) {
+    // Increment the last digit recursively until we find a non '9' digit.
+    buffer[length - 1]++;
+    for (int i = length - 1; i > 0; --i) {
+      if (buffer[i] != '0' + 10) break;
+      buffer[i] = '0';
+      buffer[i - 1]++;
+    }
+    // If the first digit is now '0'+ 10 we had a buffer with all '9's. With the
+    // exception of the first digit all digits are now '0'. Simply switch the
+    // first digit to '1' and adjust the kappa. Example: "99" becomes "10" and
+    // the power (the kappa) is increased.
+    if (buffer[0] == '0' + 10) {
+      buffer[0] = '1';
+      (*kappa) += 1;
+    }
+    return true;
+  }
+  return false;
+}
+
 
 static const uint32_t kTen4 = 10000;
 static const uint32_t kTen5 = 100000;
@@ -178,7 +237,7 @@
 // number. We furthermore receive the maximum number of bits 'number' has.
 // If number_bits == 0 then 0^-1 is returned
 // The number of bits must be <= 32.
-// Precondition: (1 << number_bits) <= number < (1 << (number_bits + 1)).
+// Precondition: number < (1 << (number_bits + 1)).
 static void BiggestPowerTen(uint32_t number,
                             int number_bits,
                             uint32_t* power,
@@ -281,18 +340,18 @@
 
 // Generates the digits of input number w.
 // w is a floating-point number (DiyFp), consisting of a significand and an
-// exponent. Its exponent is bounded by minimal_target_exponent and
-// maximal_target_exponent.
+// exponent. Its exponent is bounded by kMinimalTargetExponent and
+// kMaximalTargetExponent.
 //       Hence -60 <= w.e() <= -32.
 //
 // Returns false if it fails, in which case the generated digits in the buffer
 // should not be used.
 // Preconditions:
 //  * low, w and high are correct up to 1 ulp (unit in the last place). That
-//    is, their error must be less that a unit of their last digits.
+//    is, their error must be less than a unit of their last digits.
 //  * low.e() == w.e() == high.e()
 //  * low < w < high, and taking into account their error: low~ <= high~
-//  * minimal_target_exponent <= w.e() <= maximal_target_exponent
+//  * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
 // Postconditions: returns false if procedure fails.
 //   otherwise:
 //     * buffer is not null-terminated, but len contains the number of digits.
@@ -321,15 +380,15 @@
 // represent 'w' we can stop. Everything inside the interval low - high
 // represents w. However we have to pay attention to low, high and w's
 // imprecision.
-bool DigitGen(DiyFp low,
-              DiyFp w,
-              DiyFp high,
-              Vector<char> buffer,
-              int* length,
-              int* kappa) {
+static bool DigitGen(DiyFp low,
+                     DiyFp w,
+                     DiyFp high,
+                     Vector<char> buffer,
+                     int* length,
+                     int* kappa) {
   ASSERT(low.e() == w.e() && w.e() == high.e());
   ASSERT(low.f() + 1 <= high.f() - 1);
-  ASSERT(minimal_target_exponent <= w.e() && w.e() <= maximal_target_exponent);
+  ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
   // low, w and high are imprecise, but by less than one ulp (unit in the last
   // place).
   // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
@@ -359,23 +418,23 @@
   uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
   // Modulo by one is an and.
   uint64_t fractionals = too_high.f() & (one.f() - 1);
-  uint32_t divider;
-  int divider_exponent;
+  uint32_t divisor;
+  int divisor_exponent;
   BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
-                  &divider, &divider_exponent);
-  *kappa = divider_exponent + 1;
+                  &divisor, &divisor_exponent);
+  *kappa = divisor_exponent + 1;
   *length = 0;
   // Loop invariant: buffer = too_high / 10^kappa  (integer division)
   // The invariant holds for the first iteration: kappa has been initialized
-  // with the divider exponent + 1. And the divider is the biggest power of ten
+  // with the divisor exponent + 1. And the divisor is the biggest power of ten
   // that is smaller than integrals.
   while (*kappa > 0) {
-    int digit = integrals / divider;
+    int digit = integrals / divisor;
     buffer[*length] = '0' + digit;
     (*length)++;
-    integrals %= divider;
+    integrals %= divisor;
     (*kappa)--;
-    // Note that kappa now equals the exponent of the divider and that the
+    // Note that kappa now equals the exponent of the divisor and that the
     // invariant thus holds again.
     uint64_t rest =
         (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
@@ -386,32 +445,24 @@
       // that lies within the unsafe interval.
       return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
                        unsafe_interval.f(), rest,
-                       static_cast<uint64_t>(divider) << -one.e(), unit);
+                       static_cast<uint64_t>(divisor) << -one.e(), unit);
     }
-    divider /= 10;
+    divisor /= 10;
   }
 
   // The integrals have been generated. We are at the point of the decimal
   // separator. In the following loop we simply multiply the remaining digits by
   // 10 and divide by one. We just need to pay attention to multiply associated
   // data (like the interval or 'unit'), too.
-  // Instead of multiplying by 10 we multiply by 5 (cheaper operation) and
-  // increase its (imaginary) exponent. At the same time we decrease the
-  // divider's (one's) exponent and shift its significand.
-  // Basically, if fractionals was a DiyFp (with fractionals.e == one.e):
-  //      fractionals.f *= 10;
-  //      fractionals.f >>= 1; fractionals.e++; // value remains unchanged.
-  //      one.f >>= 1; one.e++;                 // value remains unchanged.
-  //      and we have again fractionals.e == one.e which allows us to divide
-  //           fractionals.f() by one.f()
-  // We simply combine the *= 10 and the >>= 1.
+  // Note that the multiplication by 10 does not overflow, because w.e >= -60
+  // and thus one.e >= -60.
+  ASSERT(one.e() >= -60);
+  ASSERT(fractionals < one.f());
+  ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
   while (true) {
-    fractionals *= 5;
-    unit *= 5;
-    unsafe_interval.set_f(unsafe_interval.f() * 5);
-    unsafe_interval.set_e(unsafe_interval.e() + 1);  // Will be optimized out.
-    one.set_f(one.f() >> 1);
-    one.set_e(one.e() + 1);
+    fractionals *= 10;
+    unit *= 10;
+    unsafe_interval.set_f(unsafe_interval.f() * 10);
     // Integer division by one.
     int digit = static_cast<int>(fractionals >> -one.e());
     buffer[*length] = '0' + digit;
@@ -426,6 +477,113 @@
 }
 
 
+
+// Generates (at most) requested_digits of input number w.
+// w is a floating-point number (DiyFp), consisting of a significand and an
+// exponent. Its exponent is bounded by kMinimalTargetExponent and
+// kMaximalTargetExponent.
+//       Hence -60 <= w.e() <= -32.
+//
+// Returns false if it fails, in which case the generated digits in the buffer
+// should not be used.
+// Preconditions:
+//  * w is correct up to 1 ulp (unit in the last place). That
+//    is, its error must be strictly less than a unit of its last digit.
+//  * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
+//
+// Postconditions: returns false if procedure fails.
+//   otherwise:
+//     * buffer is not null-terminated, but length contains the number of
+//       digits.
+//     * the representation in buffer is the most precise representation of
+//       requested_digits digits.
+//     * buffer contains at most requested_digits digits of w. If there are less
+//       than requested_digits digits then some trailing '0's have been removed.
+//     * kappa is such that
+//            w = buffer * 10^kappa + eps with |eps| < 10^kappa / 2.
+//
+// Remark: This procedure takes into account the imprecision of its input
+//   numbers. If the precision is not enough to guarantee all the postconditions
+//   then false is returned. This usually happens rarely, but the failure-rate
+//   increases with higher requested_digits.
+static bool DigitGenCounted(DiyFp w,
+                            int requested_digits,
+                            Vector<char> buffer,
+                            int* length,
+                            int* kappa) {
+  ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
+  ASSERT(kMinimalTargetExponent >= -60);
+  ASSERT(kMaximalTargetExponent <= -32);
+  // w is assumed to have an error less than 1 unit. Whenever w is scaled we
+  // also scale its error.
+  uint64_t w_error = 1;
+  // We cut the input number into two parts: the integral digits and the
+  // fractional digits. We don't emit any decimal separator, but adapt kappa
+  // instead. Example: instead of writing "1.2" we put "12" into the buffer and
+  // increase kappa by 1.
+  DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
+  // Division by one is a shift.
+  uint32_t integrals = static_cast<uint32_t>(w.f() >> -one.e());
+  // Modulo by one is an and.
+  uint64_t fractionals = w.f() & (one.f() - 1);
+  uint32_t divisor;
+  int divisor_exponent;
+  BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
+                  &divisor, &divisor_exponent);
+  *kappa = divisor_exponent + 1;
+  *length = 0;
+
+  // Loop invariant: buffer = w / 10^kappa  (integer division)
+  // The invariant holds for the first iteration: kappa has been initialized
+  // with the divisor exponent + 1. And the divisor is the biggest power of ten
+  // that is smaller than 'integrals'.
+  while (*kappa > 0) {
+    int digit = integrals / divisor;
+    buffer[*length] = '0' + digit;
+    (*length)++;
+    requested_digits--;
+    integrals %= divisor;
+    (*kappa)--;
+    // Note that kappa now equals the exponent of the divisor and that the
+    // invariant thus holds again.
+    if (requested_digits == 0) break;
+    divisor /= 10;
+  }
+
+  if (requested_digits == 0) {
+    uint64_t rest =
+        (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
+    return RoundWeedCounted(buffer, *length, rest,
+                            static_cast<uint64_t>(divisor) << -one.e(), w_error,
+                            kappa);
+  }
+
+  // The integrals have been generated. We are at the point of the decimal
+  // separator. In the following loop we simply multiply the remaining digits by
+  // 10 and divide by one. We just need to pay attention to multiply associated
+  // data (the 'unit'), too.
+  // Note that the multiplication by 10 does not overflow, because w.e >= -60
+  // and thus one.e >= -60.
+  ASSERT(one.e() >= -60);
+  ASSERT(fractionals < one.f());
+  ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
+  while (requested_digits > 0 && fractionals > w_error) {
+    fractionals *= 10;
+    w_error *= 10;
+    // Integer division by one.
+    int digit = static_cast<int>(fractionals >> -one.e());
+    buffer[*length] = '0' + digit;
+    (*length)++;
+    requested_digits--;
+    fractionals &= one.f() - 1;  // Modulo by one.
+    (*kappa)--;
+  }
+  if (requested_digits != 0) return false;
+  return RoundWeedCounted(buffer, *length, fractionals, one.f(), w_error,
+                          kappa);
+}
+
+
 // Provides a decimal representation of v.
 // Returns true if it succeeds, otherwise the result cannot be trusted.
 // There will be *length digits inside the buffer (not null-terminated).
@@ -437,7 +595,10 @@
 // The last digit will be closest to the actual v. That is, even if several
 // digits might correctly yield 'v' when read again, the closest will be
 // computed.
-bool grisu3(double v, Vector<char> buffer, int* length, int* decimal_exponent) {
+static bool Grisu3(double v,
+                   Vector<char> buffer,
+                   int* length,
+                   int* decimal_exponent) {
   DiyFp w = Double(v).AsNormalizedDiyFp();
   // boundary_minus and boundary_plus are the boundaries between v and its
   // closest floating-point neighbors. Any number strictly between
@@ -448,12 +609,12 @@
   ASSERT(boundary_plus.e() == w.e());
   DiyFp ten_mk;  // Cached power of ten: 10^-k
   int mk;        // -k
-  GetCachedPower(w.e() + DiyFp::kSignificandSize, minimal_target_exponent,
-                 maximal_target_exponent, &mk, &ten_mk);
-  ASSERT(minimal_target_exponent <= w.e() + ten_mk.e() +
-         DiyFp::kSignificandSize &&
-         maximal_target_exponent >= w.e() + ten_mk.e() +
-         DiyFp::kSignificandSize);
+  GetCachedPower(w.e() + DiyFp::kSignificandSize, kMinimalTargetExponent,
+                 kMaximalTargetExponent, &mk, &ten_mk);
+  ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+          DiyFp::kSignificandSize) &&
+         (kMaximalTargetExponent >= w.e() + ten_mk.e() +
+          DiyFp::kSignificandSize));
   // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
   // 64 bit significand and ten_mk is thus only precise up to 64 bits.
 
@@ -488,17 +649,75 @@
 }
 
 
+// The "counted" version of grisu3 (see above) only generates requested_digits
+// number of digits. This version does not generate the shortest representation,
+// and with enough requested digits 0.1 will at some point print as 0.9999999...
+// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and
+// therefore the rounding strategy for halfway cases is irrelevant.
+static bool Grisu3Counted(double v,
+                          int requested_digits,
+                          Vector<char> buffer,
+                          int* length,
+                          int* decimal_exponent) {
+  DiyFp w = Double(v).AsNormalizedDiyFp();
+  DiyFp ten_mk;  // Cached power of ten: 10^-k
+  int mk;        // -k
+  GetCachedPower(w.e() + DiyFp::kSignificandSize, kMinimalTargetExponent,
+                 kMaximalTargetExponent, &mk, &ten_mk);
+  ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+          DiyFp::kSignificandSize) &&
+         (kMaximalTargetExponent >= w.e() + ten_mk.e() +
+          DiyFp::kSignificandSize));
+  // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
+  // 64 bit significand and ten_mk is thus only precise up to 64 bits.
+
+  // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
+  // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
+  // off by a small amount.
+  // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
+  // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
+  //           (f-1) * 2^e < w*10^k < (f+1) * 2^e
+  DiyFp scaled_w = DiyFp::Times(w, ten_mk);
+
+  // We now have (double) (scaled_w * 10^-mk).
+  // DigitGen will generate the first requested_digits digits of scaled_w and
+  // return together with a kappa such that scaled_w ~= buffer * 10^kappa. (It
+  // will not always be exactly the same since DigitGenCounted only produces a
+  // limited number of digits.)
+  int kappa;
+  bool result = DigitGenCounted(scaled_w, requested_digits,
+                                buffer, length, &kappa);
+  *decimal_exponent = -mk + kappa;
+  return result;
+}
+
+
 bool FastDtoa(double v,
+              FastDtoaMode mode,
+              int requested_digits,
               Vector<char> buffer,
               int* length,
-              int* point) {
+              int* decimal_point) {
   ASSERT(v > 0);
   ASSERT(!Double(v).IsSpecial());
 
-  int decimal_exponent;
-  bool result = grisu3(v, buffer, length, &decimal_exponent);
-  *point = *length + decimal_exponent;
-  buffer[*length] = '\0';
+  bool result = false;
+  int decimal_exponent = 0;
+  switch (mode) {
+    case FAST_DTOA_SHORTEST:
+      result = Grisu3(v, buffer, length, &decimal_exponent);
+      break;
+    case FAST_DTOA_PRECISION:
+      result = Grisu3Counted(v, requested_digits,
+                             buffer, length, &decimal_exponent);
+      break;
+    default:
+      UNREACHABLE();
+  }
+  if (result) {
+    *decimal_point = *length + decimal_exponent;
+    buffer[*length] = '\0';
+  }
   return result;
 }
 
diff --git a/src/fast-dtoa.h b/src/fast-dtoa.h
index 4403a75..94c22ec 100644
--- a/src/fast-dtoa.h
+++ b/src/fast-dtoa.h
@@ -31,27 +31,52 @@
 namespace v8 {
 namespace internal {
 
+enum FastDtoaMode {
+  // Computes the shortest representation of the given input. The returned
+  // result will be the most accurate number of this length. Longer
+  // representations might be more accurate.
+  FAST_DTOA_SHORTEST,
+  // Computes a representation where the precision (number of digits) is
+  // given as input. The precision is independent of the decimal point.
+  FAST_DTOA_PRECISION
+};
+
 // FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
 // include the terminating '\0' character.
 static const int kFastDtoaMaximalLength = 17;
 
 // Provides a decimal representation of v.
-// v must be a strictly positive finite double.
+// The result should be interpreted as buffer * 10^(point - length).
+//
+// Precondition:
+//   * v must be a strictly positive finite double.
+//
 // Returns true if it succeeds, otherwise the result can not be trusted.
 // There will be *length digits inside the buffer followed by a null terminator.
-// If the function returns true then
-//   v == (double) (buffer * 10^(point - length)).
-// The digits in the buffer are the shortest representation possible: no
-// 0.099999999999 instead of 0.1.
-// The last digit will be closest to the actual v. That is, even if several
-// digits might correctly yield 'v' when read again, the buffer will contain the
-// one closest to v.
-// The variable 'sign' will be '0' if the given number is positive, and '1'
-//   otherwise.
+// If the function returns true and mode equals
+//   - FAST_DTOA_SHORTEST, then
+//     the parameter requested_digits is ignored.
+//     The result satisfies
+//         v == (double) (buffer * 10^(point - length)).
+//     The digits in the buffer are the shortest representation possible. E.g.
+//     if 0.099999999999 and 0.1 represent the same double then "1" is returned
+//     with point = 0.
+//     The last digit will be closest to the actual v. That is, even if several
+//     digits might correctly yield 'v' when read again, the buffer will contain
+//     the one closest to v.
+//   - FAST_DTOA_PRECISION, then
+//     the buffer contains requested_digits digits.
+//     the difference v - (buffer * 10^(point-length)) is closest to zero for
+//     all possible representations of requested_digits digits.
+//     If there are two values that are equally close, then FastDtoa returns
+//     false.
+// For both modes the buffer must be large enough to hold the result.
 bool FastDtoa(double d,
+              FastDtoaMode mode,
+              int requested_digits,
               Vector<char> buffer,
               int* length,
-              int* point);
+              int* decimal_point);
 
 } }  // namespace v8::internal
 
diff --git a/src/frames.cc b/src/frames.cc
index 76a441b..3cdb015 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -143,8 +143,8 @@
     state.pc_address =
         reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_));
     type = StackFrame::ComputeType(&state);
-    if (SingletonFor(type) == NULL) return;
   }
+  if (SingletonFor(type) == NULL) return;
   frame_ = SingletonFor(type, &state);
 }
 
@@ -203,13 +203,24 @@
 // -------------------------------------------------------------------------
 
 
+bool SafeStackFrameIterator::ExitFrameValidator::IsValidFP(Address fp) {
+  if (!validator_.IsValid(fp)) return false;
+  Address sp = ExitFrame::ComputeStackPointer(fp);
+  if (!validator_.IsValid(sp)) return false;
+  StackFrame::State state;
+  ExitFrame::FillState(fp, sp, &state);
+  if (!validator_.IsValid(reinterpret_cast<Address>(state.pc_address))) {
+    return false;
+  }
+  return *state.pc_address != NULL;
+}
+
+
 SafeStackFrameIterator::SafeStackFrameIterator(
     Address fp, Address sp, Address low_bound, Address high_bound) :
-    maintainer_(), low_bound_(low_bound), high_bound_(high_bound),
-    is_valid_top_(
-        IsWithinBounds(low_bound, high_bound,
-                       Top::c_entry_fp(Top::GetCurrentThread())) &&
-        Top::handler(Top::GetCurrentThread()) != NULL),
+    maintainer_(),
+    stack_validator_(low_bound, high_bound),
+    is_valid_top_(IsValidTop(low_bound, high_bound)),
     is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
     is_working_iterator_(is_valid_top_ || is_valid_fp_),
     iteration_done_(!is_working_iterator_),
@@ -217,6 +228,14 @@
 }
 
 
+bool SafeStackFrameIterator::IsValidTop(Address low_bound, Address high_bound) {
+  Address fp = Top::c_entry_fp(Top::GetCurrentThread());
+  ExitFrameValidator validator(low_bound, high_bound);
+  if (!validator.IsValidFP(fp)) return false;
+  return Top::handler(Top::GetCurrentThread()) != NULL;
+}
+
+
 void SafeStackFrameIterator::Advance() {
   ASSERT(is_working_iterator_);
   ASSERT(!done());
@@ -258,9 +277,8 @@
     // sure that caller FP address is valid.
     Address caller_fp = Memory::Address_at(
         frame->fp() + EntryFrameConstants::kCallerFPOffset);
-    if (!IsValidStackAddress(caller_fp)) {
-      return false;
-    }
+    ExitFrameValidator validator(stack_validator_);
+    if (!validator.IsValidFP(caller_fp)) return false;
   } else if (frame->is_arguments_adaptor()) {
     // See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
     // the number of arguments is stored on stack as Smi. We need to check
@@ -415,6 +433,22 @@
 }
 
 
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+  if (fp == 0) return NONE;
+  Address sp = ComputeStackPointer(fp);
+  FillState(fp, sp, state);
+  ASSERT(*state->pc_address != NULL);
+  return EXIT;
+}
+
+
+void ExitFrame::FillState(Address fp, Address sp, State* state) {
+  state->sp = sp;
+  state->fp = fp;
+  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+}
+
+
 Address StandardFrame::GetExpressionAddress(int n) const {
   const int offset = StandardFrameConstants::kExpressionsOffset;
   return fp() + offset - n * kPointerSize;
diff --git a/src/frames.h b/src/frames.h
index 2011190..2d4f338 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -67,7 +67,7 @@
   static PcToCodeCacheEntry* GetCacheEntry(Address pc);
 
  private:
-  static const int kPcToCodeCacheSize = 256;
+  static const int kPcToCodeCacheSize = 1024;
   static PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
 };
 
@@ -141,6 +141,13 @@
     NO_ID = 0
   };
 
+  struct State {
+    State() : sp(NULL), fp(NULL), pc_address(NULL) { }
+    Address sp;
+    Address fp;
+    Address* pc_address;
+  };
+
   // Copy constructor; it breaks the connection to host iterator.
   StackFrame(const StackFrame& original) {
     this->state_ = original.state_;
@@ -201,12 +208,6 @@
                      int index) const { }
 
  protected:
-  struct State {
-    Address sp;
-    Address fp;
-    Address* pc_address;
-  };
-
   explicit StackFrame(StackFrameIterator* iterator) : iterator_(iterator) { }
   virtual ~StackFrame() { }
 
@@ -318,6 +319,8 @@
   // pointer. Used when constructing the first stack frame seen by an
   // iterator and the frames following entry frames.
   static Type GetStateForFramePointer(Address fp, State* state);
+  static Address ComputeStackPointer(Address fp);
+  static void FillState(Address fp, Address sp, State* state);
 
  protected:
   explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
@@ -443,6 +446,7 @@
   inline Object* function_slot_object() const;
 
   friend class StackFrameIterator;
+  friend class StackTracer;
 };
 
 
@@ -654,12 +658,36 @@
   }
 
  private:
+  class StackAddressValidator {
+   public:
+    StackAddressValidator(Address low_bound, Address high_bound)
+        : low_bound_(low_bound), high_bound_(high_bound) { }
+    bool IsValid(Address addr) const {
+      return IsWithinBounds(low_bound_, high_bound_, addr);
+    }
+   private:
+    Address low_bound_;
+    Address high_bound_;
+  };
+
+  class ExitFrameValidator {
+   public:
+    explicit ExitFrameValidator(const StackAddressValidator& validator)
+        : validator_(validator) { }
+    ExitFrameValidator(Address low_bound, Address high_bound)
+        : validator_(low_bound, high_bound) { }
+    bool IsValidFP(Address fp);
+   private:
+    StackAddressValidator validator_;
+  };
+
   bool IsValidStackAddress(Address addr) const {
-    return IsWithinBounds(low_bound_, high_bound_, addr);
+    return stack_validator_.IsValid(addr);
   }
   bool CanIterateHandles(StackFrame* frame, StackHandler* handler);
   bool IsValidFrame(StackFrame* frame) const;
   bool IsValidCaller(StackFrame* frame);
+  static bool IsValidTop(Address low_bound, Address high_bound);
 
   // This is a nasty hack to make sure the active count is incremented
   // before the constructor for the embedded iterator is invoked. This
@@ -674,8 +702,7 @@
 
   ActiveCountMaintainer maintainer_;
   static int active_count_;
-  Address low_bound_;
-  Address high_bound_;
+  StackAddressValidator stack_validator_;
   const bool is_valid_top_;
   const bool is_valid_fp_;
   const bool is_working_iterator_;
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 9db233c..2d60d5b 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -509,6 +509,9 @@
   static Register result_register();
   static Register context_register();
 
+  // Helper for calling an IC stub.
+  void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
+
   // Set fields in the stack frame. Offsets are the frame pointer relative
   // offsets defined in, e.g., StandardFrameConstants.
   void StoreToFrameField(int frame_offset, Register value);
diff --git a/src/heap.cc b/src/heap.cc
index 650800f..905d065 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -2650,6 +2650,20 @@
 }
 
 
+static bool HasDuplicates(DescriptorArray* descriptors) {
+  int count = descriptors->number_of_descriptors();
+  if (count > 1) {
+    String* prev_key = descriptors->GetKey(0);
+    for (int i = 1; i != count; i++) {
+      String* current_key = descriptors->GetKey(i);
+      if (prev_key == current_key) return true;
+      prev_key = current_key;
+    }
+  }
+  return false;
+}
+
+
 Object* Heap::AllocateInitialMap(JSFunction* fun) {
   ASSERT(!fun->has_initial_map());
 
@@ -2683,23 +2697,34 @@
   if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
     int count = fun->shared()->this_property_assignments_count();
     if (count > in_object_properties) {
-      count = in_object_properties;
+      // Inline constructor can only handle inobject properties.
+      fun->shared()->ForbidInlineConstructor();
+    } else {
+      Object* descriptors_obj = DescriptorArray::Allocate(count);
+      if (descriptors_obj->IsFailure()) return descriptors_obj;
+      DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
+      for (int i = 0; i < count; i++) {
+        String* name = fun->shared()->GetThisPropertyAssignmentName(i);
+        ASSERT(name->IsSymbol());
+        FieldDescriptor field(name, i, NONE);
+        field.SetEnumerationIndex(i);
+        descriptors->Set(i, &field);
+      }
+      descriptors->SetNextEnumerationIndex(count);
+      descriptors->SortUnchecked();
+
+      // The descriptors may contain duplicates because the compiler does not
+      // guarantee the uniqueness of property names (it would have required
+      // quadratic time). Once the descriptors are sorted we can check for
+      // duplicates in linear time.
+      if (HasDuplicates(descriptors)) {
+        fun->shared()->ForbidInlineConstructor();
+      } else {
+        map->set_instance_descriptors(descriptors);
+        map->set_pre_allocated_property_fields(count);
+        map->set_unused_property_fields(in_object_properties - count);
+      }
     }
-    Object* descriptors_obj = DescriptorArray::Allocate(count);
-    if (descriptors_obj->IsFailure()) return descriptors_obj;
-    DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
-    for (int i = 0; i < count; i++) {
-      String* name = fun->shared()->GetThisPropertyAssignmentName(i);
-      ASSERT(name->IsSymbol());
-      FieldDescriptor field(name, i, NONE);
-      field.SetEnumerationIndex(i);
-      descriptors->Set(i, &field);
-    }
-    descriptors->SetNextEnumerationIndex(count);
-    descriptors->Sort();
-    map->set_instance_descriptors(descriptors);
-    map->set_pre_allocated_property_fields(count);
-    map->set_unused_property_fields(in_object_properties - count);
   }
   return map;
 }
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index eef307d..e0cb8a1 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -2179,6 +2179,16 @@
 }
 
 
+void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x54);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2201,7 +2211,29 @@
 }
 
 
-void Assembler::movdqa(const Operand& dst, XMMRegister src ) {
+void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0xC2);
+  emit_sse_operand(dst, src);
+  EMIT(1);  // LT == 1
+}
+
+
+void Assembler::movaps(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0x28);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movdqa(const Operand& dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2358,6 +2390,19 @@
   emit_sse_operand(dst, src);
 }
 
+
+void Assembler::psllq(XMMRegister reg, int8_t imm8) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x73);
+  emit_sse_operand(esi, reg);  // esi == 6
+  EMIT(imm8);
+}
+
+
 void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
   Register ireg = { reg.code() };
   emit_operand(ireg, adr);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 928f172..539addd 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -788,9 +788,15 @@
   void xorpd(XMMRegister dst, XMMRegister src);
   void sqrtsd(XMMRegister dst, XMMRegister src);
 
+  void andpd(XMMRegister dst, XMMRegister src);
+
   void ucomisd(XMMRegister dst, XMMRegister src);
   void movmskpd(Register dst, XMMRegister src);
 
+  void cmpltsd(XMMRegister dst, XMMRegister src);
+
+  void movaps(XMMRegister dst, XMMRegister src);
+
   void movdqa(XMMRegister dst, const Operand& src);
   void movdqa(const Operand& dst, XMMRegister src);
   void movdqu(XMMRegister dst, const Operand& src);
@@ -806,6 +812,8 @@
   void pxor(XMMRegister dst, XMMRegister src);
   void ptest(XMMRegister dst, XMMRegister src);
 
+  void psllq(XMMRegister reg, int8_t imm8);
+
   // Parallel XMM operations.
   void movntdqa(XMMRegister src, const Operand& dst);
   void movntdq(const Operand& dst, XMMRegister src);
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 86f3877..bde2f18 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -9144,9 +9144,15 @@
  public:
   DeferredReferenceGetNamedValue(Register dst,
                                  Register receiver,
-                                 Handle<String> name)
-      : dst_(dst), receiver_(receiver),  name_(name) {
-    set_comment("[ DeferredReferenceGetNamedValue");
+                                 Handle<String> name,
+                                 bool is_contextual)
+      : dst_(dst),
+        receiver_(receiver),
+        name_(name),
+        is_contextual_(is_contextual) {
+    set_comment(is_contextual
+                ? "[ DeferredReferenceGetNamedValue (contextual)"
+                : "[ DeferredReferenceGetNamedValue");
   }
 
   virtual void Generate();
@@ -9158,6 +9164,7 @@
   Register dst_;
   Register receiver_;
   Handle<String> name_;
+  bool is_contextual_;
 };
 
 
@@ -9167,9 +9174,15 @@
   }
   __ Set(ecx, Immediate(name_));
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  // The call must be followed by a test eax instruction to indicate
-  // that the inobject property case was inlined.
+  RelocInfo::Mode mode = is_contextual_
+      ? RelocInfo::CODE_TARGET_CONTEXT
+      : RelocInfo::CODE_TARGET;
+  __ call(ic, mode);
+  // The call must be followed by:
+  // - a test eax instruction to indicate that the inobject property
+  //   case was inlined.
+  // - a mov ecx instruction to indicate that the contextual property
+  //   load was inlined.
   //
   // Store the delta to the map check instruction here in the test
   // instruction.  Use masm_-> instead of the __ macro since the
@@ -9177,8 +9190,13 @@
   int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
   // Here we use masm_-> instead of the __ macro because this is the
   // instruction that gets patched and coverage code gets in the way.
-  masm_->test(eax, Immediate(-delta_to_patch_site));
-  __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+  if (is_contextual_) {
+    masm_->mov(ecx, -delta_to_patch_site);
+    __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+  } else {
+    masm_->test(eax, Immediate(-delta_to_patch_site));
+    __ IncrementCounter(&Counters::named_load_inline_miss, 1);
+  }
 
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
@@ -9349,12 +9367,17 @@
 #ifdef DEBUG
   int original_height = frame()->height();
 #endif
+
+  bool contextual_load_in_builtin =
+      is_contextual &&
+      (Bootstrapper::IsActive() ||
+       (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
+
   Result result;
-  // Do not inline the inobject property case for loads from the global
-  // object.  Also do not inline for unoptimized code.  This saves time in
-  // the code generator.  Unoptimized code is toplevel code or code that is
-  // not in a loop.
-  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+  // Do not inline in the global code or when not in loop.
+  if (scope()->is_global_scope() ||
+      loop_nesting() == 0 ||
+      contextual_load_in_builtin) {
     Comment cmnt(masm(), "[ Load from named Property");
     frame()->Push(name);
 
@@ -9367,19 +9390,26 @@
     // instruction here.
     __ nop();
   } else {
-    // Inline the inobject property case.
-    Comment cmnt(masm(), "[ Inlined named property load");
+    // Inline the property load.
+    Comment cmnt(masm(), is_contextual
+                         ? "[ Inlined contextual property load"
+                         : "[ Inlined named property load");
     Result receiver = frame()->Pop();
     receiver.ToRegister();
 
     result = allocator()->Allocate();
     ASSERT(result.is_valid());
     DeferredReferenceGetNamedValue* deferred =
-        new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
+        new DeferredReferenceGetNamedValue(result.reg(),
+                                           receiver.reg(),
+                                           name,
+                                           is_contextual);
 
-    // Check that the receiver is a heap object.
-    __ test(receiver.reg(), Immediate(kSmiTagMask));
-    deferred->Branch(zero);
+    if (!is_contextual) {
+      // Check that the receiver is a heap object.
+      __ test(receiver.reg(), Immediate(kSmiTagMask));
+      deferred->Branch(zero);
+    }
 
     __ bind(deferred->patch_site());
     // This is the map check instruction that will be patched (so we can't
@@ -9391,17 +9421,33 @@
     // which allows the assert below to succeed and patching to work.
     deferred->Branch(not_equal);
 
-    // The delta from the patch label to the load offset must be statically
-    // known.
+    // The delta from the patch label to the actual load must be
+    // statically known.
     ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
            LoadIC::kOffsetToLoadInstruction);
-    // The initial (invalid) offset has to be large enough to force a 32-bit
-    // instruction encoding to allow patching with an arbitrary offset.  Use
-    // kMaxInt (minus kHeapObjectTag).
-    int offset = kMaxInt;
-    masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
 
-    __ IncrementCounter(&Counters::named_load_inline, 1);
+    if (is_contextual) {
+      // Load the (initialy invalid) cell and get its value.
+      masm()->mov(result.reg(), Factory::null_value());
+      if (FLAG_debug_code) {
+        __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
+               Factory::global_property_cell_map());
+        __ Assert(equal, "Uninitialized inlined contextual load");
+      }
+      __ mov(result.reg(),
+             FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
+      __ cmp(result.reg(), Factory::the_hole_value());
+      deferred->Branch(equal);
+      __ IncrementCounter(&Counters::named_load_global_inline, 1);
+    } else {
+      // The initial (invalid) offset has to be large enough to force a 32-bit
+      // instruction encoding to allow patching with an arbitrary offset.  Use
+      // kMaxInt (minus kHeapObjectTag).
+      int offset = kMaxInt;
+      masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
+      __ IncrementCounter(&Counters::named_load_inline, 1);
+    }
+
     deferred->BindExit();
   }
   ASSERT(frame()->height() == original_height - 1);
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 64305ef..207648b 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -685,7 +685,8 @@
 
     case 0xDD: switch (regop) {
         case 0: mnem = "fld_d"; break;
-        case 2: mnem = "fstp"; break;
+        case 1: mnem = "fisttp_d"; break;
+        case 2: mnem = "fst_d"; break;
         case 3: mnem = "fstp_d"; break;
         default: UnimplementedInstruction();
       }
@@ -957,6 +958,14 @@
           } else if (f0byte == 0xA2 || f0byte == 0x31) {
             AppendToBuffer("%s", f0mnem);
             data += 2;
+          } else if (f0byte == 0x28) {
+            data += 2;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("movaps %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else if ((f0byte & 0xF0) == 0x80) {
             data += JumpConditional(data, branch_hint);
           } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
@@ -1156,6 +1165,23 @@
                             NameOfXMMRegister(regop),
                             NameOfXMMRegister(rm));
              data++;
+          } else if (*data == 0x73) {
+             data++;
+             int mod, regop, rm;
+             get_modrm(*data, &mod, &regop, &rm);
+             int8_t imm8 = static_cast<int8_t>(data[1]);
+             AppendToBuffer("psllq %s,%d",
+                            NameOfXMMRegister(rm),
+                            static_cast<int>(imm8));
+             data += 2;
+          } else if (*data == 0x54) {
+             data++;
+             int mod, regop, rm;
+             get_modrm(*data, &mod, &regop, &rm);
+             AppendToBuffer("andpd %s,%s",
+                            NameOfXMMRegister(regop),
+                            NameOfXMMRegister(rm));
+             data++;
           } else {
             UnimplementedInstruction();
           }
@@ -1274,6 +1300,23 @@
                                NameOfXMMRegister(rm));
                 data++;
               }
+            } else if (b2 == 0xC2) {
+              // Intel manual 2A, Table 3-18.
+              const char* const pseudo_op[] = {
+                "cmpeqsd",
+                "cmpltsd",
+                "cmplesd",
+                "cmpunordsd",
+                "cmpneqsd",
+                "cmpnltsd",
+                "cmpnlesd",
+                "cmpordsd"
+              };
+              AppendToBuffer("%s %s,%s",
+                             pseudo_op[data[1]],
+                             NameOfXMMRegister(regop),
+                             NameOfXMMRegister(rm));
+              data += 2;
             } else {
               if (mod != 0x3) {
                 AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
@@ -1367,7 +1410,7 @@
                                      " %s",
                                      tmp_buffer_.start());
   return instr_len;
-}
+}  // NOLINT (function is too long)
 
 
 //------------------------------------------------------------------------------
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
index 9baf763..dd44f0e 100644
--- a/src/ia32/frames-ia32.cc
+++ b/src/ia32/frames-ia32.cc
@@ -35,16 +35,8 @@
 namespace internal {
 
 
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
-  if (fp == 0) return NONE;
-  // Compute the stack pointer.
-  Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-  // Fill in the state.
-  state->fp = fp;
-  state->sp = sp;
-  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
-  ASSERT(*state->pc_address != NULL);
-  return EXIT;
+Address ExitFrame::ComputeStackPointer(Address fp) {
+  return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
 }
 
 
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 1e65c4b..8144f41 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -631,10 +631,7 @@
       __ pop(edx);
 
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // Absence of a test eax instruction following the call
-      // indicates that none of the load was inlined.
-      __ nop();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
     }
   }
 }
@@ -991,8 +988,7 @@
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
-  __ call(ic, mode);
-  __ nop();  // Signal no inlined code.
+  EmitCallIC(ic, mode);
 }
 
 
@@ -1069,7 +1065,7 @@
                                                    slow));
           __ mov(eax, Immediate(key_literal->handle()));
           Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-          __ call(ic, RelocInfo::CODE_TARGET);
+          EmitCallIC(ic, RelocInfo::CODE_TARGET);
           __ jmp(done);
         }
       }
@@ -1093,12 +1089,7 @@
     __ mov(eax, CodeGenerator::GlobalObject());
     __ mov(ecx, var->name());
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-    __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-    // By emitting a nop we make sure that we do not have a test eax
-    // instruction after the call it is treated specially by the LoadIC code
-    // Remember that the assembler may choose to do peephole optimization
-    // (eg, push/pop elimination).
-    __ nop();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
     Apply(context, eax);
 
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -1161,10 +1152,8 @@
 
     // Do a keyed property load.
     Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-    __ call(ic, RelocInfo::CODE_TARGET);
-    // Notice: We must not have a "test eax, ..." instruction after the
-    // call. It is treated specially by the LoadIC code.
-    __ nop();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+
     // Drop key and object left on the stack by IC.
     Apply(context, eax);
   }
@@ -1262,8 +1251,7 @@
           __ mov(ecx, Immediate(key->handle()));
           __ mov(edx, Operand(esp, 0));
           Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-          __ call(ic, RelocInfo::CODE_TARGET);
-          __ nop();
+          EmitCallIC(ic,  RelocInfo::CODE_TARGET);
           break;
         }
         // Fall through.
@@ -1476,16 +1464,14 @@
   Literal* key = prop->key()->AsLiteral();
   __ mov(ecx, Immediate(key->handle()));
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 }
 
 
@@ -1844,8 +1830,7 @@
       __ pop(eax);  // Restore value.
       __ mov(ecx, prop->key()->AsLiteral()->handle());
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      __ nop();  // Signal no inlined code.
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
     case KEYED_PROPERTY: {
@@ -1856,8 +1841,7 @@
       __ pop(edx);
       __ pop(eax);  // Restore value.
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      __ nop();  // Signal no inlined code.
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
   }
@@ -1880,8 +1864,7 @@
     __ mov(ecx, var->name());
     __ mov(edx, CodeGenerator::GlobalObject());
     Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-    __ call(ic, RelocInfo::CODE_TARGET);
-    __ nop();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
     // Perform the assignment for non-const variables and for initialization
@@ -1965,8 +1948,7 @@
     __ pop(edx);
   }
   Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2004,10 +1986,7 @@
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  // This nop signals to the IC that there is no inlined code at the call
-  // site for it to patch.
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2054,7 +2033,7 @@
   SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
-  __ call(ic, mode);
+  EmitCallIC(ic, mode);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   Apply(context_, eax);
@@ -2077,7 +2056,7 @@
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(
       arg_count, in_loop);
-  __ call(ic, mode);
+  EmitCallIC(ic, mode);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   Apply(context_, eax);
@@ -2201,7 +2180,7 @@
     } else {
       // Call to a keyed property.
       // For a synthetic property use keyed load IC followed by function call,
-      // for a regular property use keyed CallIC.
+      // for a regular property use keyed EmitCallIC.
       VisitForValue(prop->obj(), kStack);
       if (prop->is_synthetic()) {
         VisitForValue(prop->key(), kAccumulator);
@@ -2210,11 +2189,7 @@
         __ pop(edx);  // We do not need to keep the receiver.
 
         Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-        __ call(ic, RelocInfo::CODE_TARGET);
-        // By emitting a nop we make sure that we do not have a "test eax,..."
-        // instruction after the call as it is treated specially
-        // by the LoadIC code.
-        __ nop();
+        EmitCallIC(ic, RelocInfo::CODE_TARGET);
         // Push result (function).
         __ push(eax);
         // Push Global receiver.
@@ -3142,7 +3117,7 @@
     __ Set(ecx, Immediate(expr->name()));
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
     Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
-    __ call(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3447,10 +3422,7 @@
       __ mov(ecx, prop->key()->AsLiteral()->handle());
       __ pop(edx);
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // This nop signals to the IC that there is no inlined code at the call
-      // site for it to patch.
-      __ nop();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
         if (context_ != Expression::kEffect) {
           ApplyTOS(context_);
@@ -3464,10 +3436,7 @@
       __ pop(ecx);
       __ pop(edx);
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // This nop signals to the IC that there is no inlined code at the call
-      // site for it to patch.
-      __ nop();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
         // Result is on the stack
         if (context_ != Expression::kEffect) {
@@ -3491,8 +3460,7 @@
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    __ call(ic, RelocInfo::CODE_TARGET);
-    __ nop();  // Signal no inlined code.
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
     if (where == kStack) __ push(eax);
   } else if (proxy != NULL &&
              proxy->var()->slot() != NULL &&
@@ -3744,10 +3712,36 @@
 }
 
 
-Register FullCodeGenerator::result_register() { return eax; }
+Register FullCodeGenerator::result_register() {
+  return eax;
+}
 
 
-Register FullCodeGenerator::context_register() { return esi; }
+Register FullCodeGenerator::context_register() {
+  return esi;
+}
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+  ASSERT(mode == RelocInfo::CODE_TARGET ||
+         mode == RelocInfo::CODE_TARGET_CONTEXT);
+  __ call(ic, mode);
+
+  // If we're calling a (keyed) load or store stub, we have to mark
+  // the call as containing no inlined code so we will not attempt to
+  // patch it.
+  switch (ic->kind()) {
+    case Code::LOAD_IC:
+    case Code::KEYED_LOAD_IC:
+    case Code::STORE_IC:
+    case Code::KEYED_STORE_IC:
+      __ nop();  // Signals no inlined code.
+      break;
+    default:
+      // Do nothing.
+      break;
+  }
+}
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 3d0bd79..413c36e 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -692,7 +692,6 @@
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
-  Label index_out_of_range;
 
   Register receiver = edx;
   Register index = eax;
@@ -707,7 +706,7 @@
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
-                                          &index_out_of_range,
+                                          &miss,  // When index out of range.
                                           STRING_INDEX_IS_ARRAY_INDEX);
   char_at_generator.GenerateFast(masm);
   __ ret(0);
@@ -715,10 +714,6 @@
   ICRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm, call_helper);
 
-  __ bind(&index_out_of_range);
-  __ Set(eax, Immediate(Factory::undefined_value()));
-  __ ret(0);
-
   __ bind(&miss);
   GenerateMiss(masm);
 }
@@ -1666,6 +1661,38 @@
 }
 
 
+// One byte opcode for mov ecx,0xXXXXXXXX.
+static const byte kMovEcxByte = 0xB9;
+
+bool LoadIC::PatchInlinedContextualLoad(Address address,
+                                        Object* map,
+                                        Object* cell) {
+  // The address of the instruction following the call.
+  Address mov_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+  // If the instruction following the call is not a cmp eax, nothing
+  // was inlined.
+  if (*mov_instruction_address != kMovEcxByte) return false;
+
+  Address delta_address = mov_instruction_address + 1;
+  // The delta to the start of the map check instruction.
+  int delta = *reinterpret_cast<int*>(delta_address);
+
+  // The map address is the last 4 bytes of the 7-byte
+  // operand-immediate compare instruction, so we add 3 to get the
+  // offset to the last 4 bytes.
+  Address map_address = mov_instruction_address + delta + 3;
+  *(reinterpret_cast<Object**>(map_address)) = map;
+
+  // The cell is in the last 4 bytes of a five byte mov reg, imm32
+  // instruction, so we add 1 to get the offset to the last 4 bytes.
+  Address offset_address =
+      mov_instruction_address + delta + kOffsetToLoadInstruction + 1;
+  *reinterpret_cast<Object**>(offset_address) = cell;
+  return true;
+}
+
+
 bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
   // The address of the instruction following the call.
   Address test_instruction_address =
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 87e25d7..f8dabd5 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1553,6 +1553,17 @@
 }
 
 
+void MacroAssembler::LoadPowerOf2(XMMRegister dst,
+                                  Register scratch,
+                                  int power) {
+  ASSERT(is_uintn(power + HeapNumber::kExponentBias,
+                  HeapNumber::kExponentBits));
+  mov(scratch, Immediate(power + HeapNumber::kExponentBias));
+  movd(dst, Operand(scratch));
+  psllq(dst, HeapNumber::kMantissaBits);
+}
+
+
 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
     Register instance_type,
     Register scratch,
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index a7534cb..aa7caf5 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -258,6 +258,8 @@
                       TypeInfo info,
                       Label* on_not_int32);
 
+  void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
+
   // Abort execution if argument is not a number. Used in debug code.
   void AbortIfNotNumber(Register object);
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 828e71a..672d8c7 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -265,7 +265,11 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype) {
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  // Check we're still in the same context.
+  __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
+         Top::global());
+  __ j(not_equal, miss);
   // Get the global function with the given index.
   JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
   // Load its initial map. The global functions all have initial maps.
@@ -1626,7 +1630,8 @@
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            eax);
+                                            eax,
+                                            &miss);
   ASSERT(object != holder);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
                   ebx, edx, edi, name, &miss);
@@ -1695,7 +1700,8 @@
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            eax);
+                                            eax,
+                                            &miss);
   ASSERT(object != holder);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
                   ebx, edx, edi, name, &miss);
@@ -1813,6 +1819,131 @@
 }
 
 
+Object* CallStubCompiler::CompileMathFloorCall(Object* object,
+                                               JSObject* holder,
+                                               JSGlobalPropertyCell* cell,
+                                               JSFunction* function,
+                                               String* name) {
+  // ----------- S t a t e -------------
+  //  -- ecx                 : name
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  if (!CpuFeatures::IsSupported(SSE2)) return Heap::undefined_value();
+  CpuFeatures::Scope use_sse2(SSE2);
+
+  const int argc = arguments().immediate();
+
+  // If the object is not a JSObject or we got an unexpected number of
+  // arguments, bail out to the regular call.
+  if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
+
+  Label miss;
+  GenerateNameCheck(name, &miss);
+
+  if (cell == NULL) {
+    __ mov(edx, Operand(esp, 2 * kPointerSize));
+
+    STATIC_ASSERT(kSmiTag == 0);
+    __ test(edx, Immediate(kSmiTagMask));
+    __ j(zero, &miss);
+
+    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
+                    &miss);
+  } else {
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  // Load the (only) argument into eax.
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+  // Check if the argument is a smi.
+  Label smi;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &smi);
+
+  // Check if the argument is a heap number and load its value into xmm0.
+  Label slow;
+  __ CheckMap(eax, Factory::heap_number_map(), &slow, true);
+  __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+
+  // Check if the argument is strictly positive. Note this also
+  // discards NaN.
+  __ xorpd(xmm1, xmm1);
+  __ ucomisd(xmm0, xmm1);
+  __ j(below_equal, &slow);
+
+  // Do a truncating conversion.
+  __ cvttsd2si(eax, Operand(xmm0));
+
+  // Check if the result fits into a smi. Note this also checks for
+  // 0x80000000 which signals a failed conversion.
+  Label wont_fit_into_smi;
+  __ test(eax, Immediate(0xc0000000));
+  __ j(not_zero, &wont_fit_into_smi);
+
+  // Smi tag and return.
+  __ SmiTag(eax);
+  __ bind(&smi);
+  __ ret(2 * kPointerSize);
+
+  // Check if the argument is < 2^kMantissaBits.
+  Label already_round;
+  __ bind(&wont_fit_into_smi);
+  __ LoadPowerOf2(xmm1, ebx, HeapNumber::kMantissaBits);
+  __ ucomisd(xmm0, xmm1);
+  __ j(above_equal, &already_round);
+
+  // Save a copy of the argument.
+  __ movaps(xmm2, xmm0);
+
+  // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
+  __ addsd(xmm0, xmm1);
+  __ subsd(xmm0, xmm1);
+
+  // Compare the argument and the tentative result to get the right mask:
+  //   if xmm2 < xmm0:
+  //     xmm2 = 1...1
+  //   else:
+  //     xmm2 = 0...0
+  __ cmpltsd(xmm2, xmm0);
+
+  // Subtract 1 if the argument was less than the tentative result.
+  __ LoadPowerOf2(xmm1, ebx, 0);
+  __ andpd(xmm1, xmm2);
+  __ subsd(xmm0, xmm1);
+
+  // Return a new heap number.
+  __ AllocateHeapNumber(eax, ebx, edx, &slow);
+  __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+  __ ret(2 * kPointerSize);
+
+  // Return the argument (when it's an already round heap number).
+  __ bind(&already_round);
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+  __ ret(2 * kPointerSize);
+
+  // Tail call the full function. We do not have to patch the receiver
+  // because the function makes no use of it.
+  __ bind(&slow);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+  __ bind(&miss);
+  // ecx: function name.
+  Object* obj = GenerateMissBranch();
+  if (obj->IsFailure()) return obj;
+
+  // Return the generated code.
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
+}
+
+
 Object* CallStubCompiler::CompileCallConstant(Object* object,
                                               JSObject* holder,
                                               JSFunction* function,
@@ -1894,7 +2025,7 @@
         __ j(above_equal, &miss, not_taken);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::STRING_FUNCTION_INDEX, eax);
+            masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
                         ebx, edx, edi, name, &miss);
       }
@@ -1914,7 +2045,7 @@
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::NUMBER_FUNCTION_INDEX, eax);
+            masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
                         ebx, edx, edi, name, &miss);
       }
@@ -1935,7 +2066,7 @@
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::BOOLEAN_FUNCTION_INDEX, eax);
+            masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
                         ebx, edx, edi, name, &miss);
       }
@@ -2474,12 +2605,12 @@
     __ Check(not_equal, "DontDelete cells can't contain the hole");
   }
 
-  __ IncrementCounter(&Counters::named_load_global_inline, 1);
+  __ IncrementCounter(&Counters::named_load_global_stub, 1);
   __ mov(eax, ebx);
   __ ret(0);
 
   __ bind(&miss);
-  __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+  __ IncrementCounter(&Counters::named_load_global_stub_miss, 1);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
diff --git a/src/ic.cc b/src/ic.cc
index b4a333e..5b62a8a 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -299,6 +299,7 @@
   // present) to guarantee failure by holding an invalid map (the null
   // value).  The offset can be patched to anything.
   PatchInlinedLoad(address, Heap::null_value(), 0);
+  PatchInlinedContextualLoad(address, Heap::null_value(), Heap::null_value());
 }
 
 
@@ -720,6 +721,14 @@
 }
 
 
+#ifdef DEBUG
+#define TRACE_IC_NAMED(msg, name) \
+  if (FLAG_trace_ic) PrintF(msg, *(name)->ToCString())
+#else
+#define TRACE_IC_NAMED(msg, name)
+#endif
+
+
 Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
   // If the object is undefined or null it's illegal to try to get any
   // of its properties; throw a TypeError in that case.
@@ -797,15 +806,24 @@
     LOG(SuspectReadEvent(*name, *object));
   }
 
-  bool can_be_inlined =
+  bool can_be_inlined_precheck =
       FLAG_use_ic &&
-      state == PREMONOMORPHIC &&
       lookup.IsProperty() &&
       lookup.IsCacheable() &&
       lookup.holder() == *object &&
-      lookup.type() == FIELD &&
       !object->IsAccessCheckNeeded();
 
+  bool can_be_inlined =
+      can_be_inlined_precheck &&
+      state == PREMONOMORPHIC &&
+      lookup.type() == FIELD;
+
+  bool can_be_inlined_contextual =
+      can_be_inlined_precheck &&
+      state == UNINITIALIZED &&
+      lookup.holder()->IsGlobalObject() &&
+      lookup.type() == NORMAL;
+
   if (can_be_inlined) {
     Map* map = lookup.holder()->map();
     // Property's index in the properties array.  If negative we have
@@ -816,32 +834,29 @@
       int offset = map->instance_size() + (index * kPointerSize);
       if (PatchInlinedLoad(address(), map, offset)) {
         set_target(megamorphic_stub());
-#ifdef DEBUG
-        if (FLAG_trace_ic) {
-          PrintF("[LoadIC : inline patch %s]\n", *name->ToCString());
-        }
-#endif
+        TRACE_IC_NAMED("[LoadIC : inline patch %s]\n", name);
         return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
-#ifdef DEBUG
       } else {
-        if (FLAG_trace_ic) {
-          PrintF("[LoadIC : no inline patch %s (patching failed)]\n",
-                 *name->ToCString());
-        }
+        TRACE_IC_NAMED("[LoadIC : no inline patch %s (patching failed)]\n",
+                       name);
       }
     } else {
-      if (FLAG_trace_ic) {
-        PrintF("[LoadIC : no inline patch %s (not inobject)]\n",
-               *name->ToCString());
-      }
+      TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inobject)]\n", name);
+    }
+  } else if (can_be_inlined_contextual) {
+    Map* map = lookup.holder()->map();
+    JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
+        lookup.holder()->property_dictionary()->ValueAt(
+            lookup.GetDictionaryEntry()));
+    if (PatchInlinedContextualLoad(address(), map, cell)) {
+      set_target(megamorphic_stub());
+      TRACE_IC_NAMED("[LoadIC : inline contextual patch %s]\n", name);
+      ASSERT(cell->value() != Heap::the_hole_value());
+      return cell->value();
     }
   } else {
     if (FLAG_use_ic && state == PREMONOMORPHIC) {
-      if (FLAG_trace_ic) {
-        PrintF("[LoadIC : no inline patch %s (not inlinable)]\n",
-               *name->ToCString());
-#endif
-      }
+      TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inlinable)]\n", name);
     }
   }
 
diff --git a/src/ic.h b/src/ic.h
index 17450cc..a5fada0 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -298,6 +298,10 @@
 
   static bool PatchInlinedLoad(Address address, Object* map, int index);
 
+  static bool PatchInlinedContextualLoad(Address address,
+                                         Object* map,
+                                         Object* cell);
+
   friend class IC;
 };
 
diff --git a/src/log.cc b/src/log.cc
index 0bca5eb..a9d89a2 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -171,7 +171,9 @@
   SafeStackTraceFrameIterator it(sample->fp, sample->sp,
                                  sample->sp, js_entry_sp);
   while (!it.done() && i < TickSample::kMaxFramesCount) {
-    sample->stack[i++] = reinterpret_cast<Address>(it.frame()->function());
+    sample->stack[i++] =
+        reinterpret_cast<Address>(it.frame()->function_slot_object()) -
+            kHeapObjectTag;
     it.Advance();
   }
   sample->frames_count = i;
diff --git a/src/messages.js b/src/messages.js
index f26c3b5..4f492bc 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -684,6 +684,11 @@
   return FormatEvalOrigin(script);
 };
 
+CallSite.prototype.getScriptNameOrSourceURL = function () {
+  var script = %FunctionGetScript(this.fun);
+  return script ? script.nameOrSourceURL() : null;
+};
+
 CallSite.prototype.getFunction = function () {
   return this.fun;
 };
@@ -775,7 +780,11 @@
 };
 
 function FormatEvalOrigin(script) {
-  var eval_origin = "";
+  var sourceURL = script.nameOrSourceURL();
+  if (sourceURL)
+    return sourceURL;
+
+  var eval_origin = "eval at ";
   if (script.eval_from_function_name) {
     eval_origin += script.eval_from_function_name;
   } else {
@@ -786,9 +795,9 @@
   if (eval_from_script) {
     if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) {
       // eval script originated from another eval.
-      eval_origin += " (eval at " + FormatEvalOrigin(eval_from_script) + ")";
+      eval_origin += " (" + FormatEvalOrigin(eval_from_script) + ")";
     } else {
-      // eval script originated from "real" scource.
+      // eval script originated from "real" source.
       if (eval_from_script.name) {
         eval_origin += " (" + eval_from_script.name;
         var location = eval_from_script.locationFromPosition(script.eval_from_script_position, true);
@@ -807,25 +816,30 @@
 };
 
 function FormatSourcePosition(frame) {
+  var fileName;
   var fileLocation = "";
   if (frame.isNative()) {
     fileLocation = "native";
   } else if (frame.isEval()) {
-    fileLocation = "eval at " + frame.getEvalOrigin();
+    fileName = frame.getScriptNameOrSourceURL();
+    if (!fileName)
+      fileLocation = frame.getEvalOrigin();
   } else {
-    var fileName = frame.getFileName();
-    if (fileName) {
-      fileLocation += fileName;
-      var lineNumber = frame.getLineNumber();
-      if (lineNumber != null) {
-        fileLocation += ":" + lineNumber;
-        var columnNumber = frame.getColumnNumber();
-        if (columnNumber) {
-          fileLocation += ":" + columnNumber;
-        }
+    fileName = frame.getFileName();
+  }
+
+  if (fileName) {
+    fileLocation += fileName;
+    var lineNumber = frame.getLineNumber();
+    if (lineNumber != null) {
+      fileLocation += ":" + lineNumber;
+      var columnNumber = frame.getColumnNumber();
+      if (columnNumber) {
+        fileLocation += ":" + columnNumber;
       }
     }
   }
+
   if (!fileLocation) {
     fileLocation = "unknown source";
   }
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index 0fce3cd..d630562 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -52,9 +52,7 @@
 }
 
 
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
-  if (fp == 0) return NONE;
-  // Compute frame type and stack pointer.
+Address ExitFrame::ComputeStackPointer(Address fp) {
   Address sp = fp + ExitFrameConstants::kSPDisplacement;
   const int offset = ExitFrameConstants::kCodeOffset;
   Object* code = Memory::Object_at(fp + offset);
@@ -62,11 +60,7 @@
   if (is_debug_exit) {
     sp -= kNumJSCallerSaved * kPointerSize;
   }
-  // Fill in the state.
-  state->sp = sp;
-  state->fp = fp;
-  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
-  return EXIT;
+  return sp;
 }
 
 
diff --git a/src/objects.cc b/src/objects.cc
index ef51851..f77800a 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -3825,7 +3825,7 @@
 }
 
 
-void DescriptorArray::Sort() {
+void DescriptorArray::SortUnchecked() {
   // In-place heap sort.
   int len = number_of_descriptors();
 
@@ -3875,7 +3875,11 @@
       parent_index = child_index;
     }
   }
+}
 
+
+void DescriptorArray::Sort() {
+  SortUnchecked();
   SLOW_ASSERT(IsSortedNoDuplicates());
 }
 
@@ -5269,6 +5273,13 @@
 }
 
 
+void SharedFunctionInfo::ForbidInlineConstructor() {
+  set_compiler_hints(BooleanBit::set(compiler_hints(),
+                                     kHasOnlySimpleThisPropertyAssignments,
+                                     false));
+}
+
+
 void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
     bool only_simple_this_property_assignments,
     FixedArray* assignments) {
diff --git a/src/objects.h b/src/objects.h
index 7f6538c..1036a5a 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1892,6 +1892,11 @@
   MUST_USE_RESULT Object* RemoveTransitions();
 
   // Sort the instance descriptors by the hash codes of their keys.
+  // Does not check for duplicates.
+  void SortUnchecked();
+
+  // Sort the instance descriptors by the hash codes of their keys.
+  // Checks the result for duplicates.
   void Sort();
 
   // Search the instance descriptors for given name.
@@ -3542,6 +3547,10 @@
   // prototype.
   bool CanGenerateInlineConstructor(Object* prototype);
 
+  // Prevents further attempts to generate inline constructors.
+  // To be called if generation failed for any reason.
+  void ForbidInlineConstructor();
+
   // For functions which only contains this property assignments this provides
   // access to the names for the properties assigned.
   DECL_ACCESSORS(this_property_assignments, Object)
diff --git a/src/parser.cc b/src/parser.cc
index 856c474..aebcc9a 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -1001,7 +1001,7 @@
       Vector<Vector<const char> > symbol = symbol_entries_.AddBlock(1, literal);
       entry->key = &symbol[0];
     }
-    symbol_store_.Add(id - 1);
+    WriteNumber(id - 1);
   }
 
   virtual Vector<unsigned> ExtractData() {
@@ -1457,7 +1457,7 @@
                ParserLog* log,
                ScriptDataImpl* pre_data)
     : script_(script),
-      scanner_(is_pre_parsing),
+      scanner_(),
       top_scope_(NULL),
       with_nesting_level_(0),
       temp_scope_(NULL),
@@ -1503,6 +1503,7 @@
   source->TryFlatten();
   scanner_.Initialize(source, JAVASCRIPT);
   ASSERT(target_stack_ == NULL);
+  if (pre_data_ != NULL) pre_data_->Initialize();
 
   // Compute the parsing mode.
   mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
@@ -5492,7 +5493,9 @@
 
 
 void ScriptDataImpl::Initialize() {
+  // Prepares state for use.
   if (store_.length() >= kHeaderSize) {
+    function_index_ = kHeaderSize;
     int symbol_data_offset = kHeaderSize + store_[kFunctionsSizeOffset];
     if (store_.length() > symbol_data_offset) {
       symbol_data_ = reinterpret_cast<byte*>(&store_[symbol_data_offset]);
diff --git a/src/parser.h b/src/parser.h
index 8bab92d..8c00857 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -101,10 +101,7 @@
  public:
   explicit ScriptDataImpl(Vector<unsigned> store)
       : store_(store),
-        function_index_(kHeaderSize),
-        owns_store_(true) {
-    Initialize();
-  }
+        owns_store_(true) { }
 
   // Create an empty ScriptDataImpl that is guaranteed to not satisfy
   // a SanityCheck.
@@ -190,10 +187,8 @@
   ScriptDataImpl(const char* backing_store, int length)
       : store_(reinterpret_cast<unsigned*>(const_cast<char*>(backing_store)),
                length / sizeof(unsigned)),
-        function_index_(kHeaderSize),
         owns_store_(false) {
     ASSERT_EQ(0, reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned));
-    Initialize();
   }
 
   // Read strings written by ParserRecorder::WriteString.
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index cef825d..cdfa9e2 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -46,8 +46,7 @@
 
 
 CodeEntry::CodeEntry(int security_token_id)
-    : call_uid_(0),
-      tag_(Logger::FUNCTION_TAG),
+    : tag_(Logger::FUNCTION_TAG),
       name_prefix_(kEmptyNamePrefix),
       name_(""),
       resource_name_(""),
@@ -62,8 +61,7 @@
                      const char* resource_name,
                      int line_number,
                      int security_token_id)
-    : call_uid_(next_call_uid_++),
-      tag_(tag),
+    : tag_(tag),
       name_prefix_(name_prefix),
       name_(name),
       resource_name_(resource_name),
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index f8fa23d..a7cb7a2 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -121,11 +121,9 @@
 
 
 const char* CodeEntry::kEmptyNamePrefix = "";
-unsigned CodeEntry::next_call_uid_ = 1;
 
 
 void CodeEntry::CopyData(const CodeEntry& source) {
-  call_uid_ = source.call_uid_;
   tag_ = source.tag_;
   name_prefix_ = source.name_prefix_;
   name_ = source.name_;
@@ -134,6 +132,26 @@
 }
 
 
+uint32_t CodeEntry::GetCallUid() const {
+  uint32_t hash = ComputeIntegerHash(tag_);
+  hash ^= static_cast<int32_t>(reinterpret_cast<intptr_t>(name_prefix_));
+  hash ^= static_cast<int32_t>(reinterpret_cast<intptr_t>(name_));
+  hash ^= static_cast<int32_t>(reinterpret_cast<intptr_t>(resource_name_));
+  hash ^= static_cast<int32_t>(line_number_);
+  return hash;
+}
+
+
+bool CodeEntry::IsSameAs(CodeEntry* entry) const {
+  return this == entry
+      || (tag_ == entry->tag_
+          && name_prefix_ == entry->name_prefix_
+          && name_ == entry->name_
+          && resource_name_ == entry->resource_name_
+          && line_number_ == entry->line_number_);
+}
+
+
 ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
   HashMap::Entry* map_entry =
       children_.Lookup(entry, CodeEntryHash(entry), false);
diff --git a/src/profile-generator.h b/src/profile-generator.h
index c2bc4ce..a2ac820 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -100,17 +100,17 @@
   INLINE(const char* name() const) { return name_; }
   INLINE(const char* resource_name() const) { return resource_name_; }
   INLINE(int line_number() const) { return line_number_; }
-  INLINE(unsigned call_uid() const) { return call_uid_; }
   INLINE(int security_token_id() const) { return security_token_id_; }
 
   INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
 
   void CopyData(const CodeEntry& source);
+  uint32_t GetCallUid() const;
+  bool IsSameAs(CodeEntry* entry) const;
 
   static const char* kEmptyNamePrefix;
 
  private:
-  unsigned call_uid_;
   Logger::LogEventsAndTags tag_;
   const char* name_prefix_;
   const char* name_;
@@ -118,8 +118,6 @@
   int line_number_;
   int security_token_id_;
 
-  static unsigned next_call_uid_;
-
   DISALLOW_COPY_AND_ASSIGN(CodeEntry);
 };
 
@@ -147,11 +145,12 @@
 
  private:
   INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) {
-    return entry1 == entry2;
+    return reinterpret_cast<CodeEntry*>(entry1)->IsSameAs(
+        reinterpret_cast<CodeEntry*>(entry2));
   }
 
   INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) {
-    return static_cast<int32_t>(reinterpret_cast<intptr_t>(entry));
+    return entry->GetCallUid();
   }
 
   ProfileTree* tree_;
diff --git a/src/runtime.cc b/src/runtime.cc
index 627ea12..9e16bc4 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -946,7 +946,7 @@
   Handle<String> name(String::cast(args[1]));
   PropertyAttributes mode =
       static_cast<PropertyAttributes>(Smi::cast(args[2])->value());
-  ASSERT(mode == READ_ONLY || mode == NONE);
+  RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
   Handle<Object> initial_value(args[3]);
 
   // Declarations are always done in the function context.
@@ -8944,24 +8944,39 @@
 }
 
 
-// Change the state of break on exceptions
-// args[0]: boolean indicating uncaught exceptions
-// args[1]: boolean indicating on/off
+// Change the state of break on exceptions.
+// args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
+// args[1]: Boolean indicating on/off.
 static Object* Runtime_ChangeBreakOnException(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 2);
-  ASSERT(args[0]->IsNumber());
-  ASSERT(args[1]->IsBoolean());
+  RUNTIME_ASSERT(args[0]->IsNumber());
+  CONVERT_BOOLEAN_CHECKED(enable, args[1]);
 
-  // Update break point state
+  // If the number doesn't match an enum value, the ChangeBreakOnException
+  // function will default to affecting caught exceptions.
   ExceptionBreakType type =
       static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
-  bool enable = args[1]->ToBoolean()->IsTrue();
+  // Update break point state.
   Debug::ChangeBreakOnException(type, enable);
   return Heap::undefined_value();
 }
 
 
+// Returns the state of break on exceptions
+// args[0]: boolean indicating uncaught exceptions
+static Object* Runtime_IsBreakOnException(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  RUNTIME_ASSERT(args[0]->IsNumber());
+
+  ExceptionBreakType type =
+      static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
+  bool result = Debug::IsBreakOnException(type);
+  return Smi::FromInt(result);
+}
+
+
 // Prepare for stepping
 // args[0]: break id for checking execution state
 // args[1]: step action from the enumeration StepAction
diff --git a/src/runtime.h b/src/runtime.h
index 8a3671a..68567fc 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -332,6 +332,7 @@
   F(SetScriptBreakPoint, 3, 1) \
   F(ClearBreakPoint, 1, 1) \
   F(ChangeBreakOnException, 2, 1) \
+  F(IsBreakOnException, 1, 1) \
   F(PrepareStep, 3, 1) \
   F(ClearStepping, 0, 1) \
   F(DebugEvaluate, 4, 1) \
diff --git a/src/scanner.cc b/src/scanner.cc
index 15b1d44..79d63f1 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -342,8 +342,11 @@
 // ----------------------------------------------------------------------------
 // Scanner
 
-Scanner::Scanner(ParserMode pre)
-    : is_pre_parsing_(pre == PREPARSE), stack_overflow_(false) { }
+Scanner::Scanner()
+    : has_line_terminator_before_next_(false),
+      is_parsing_json_(false),
+      source_(NULL),
+      stack_overflow_(false) {}
 
 
 void Scanner::Initialize(Handle<String> source,
diff --git a/src/scanner.h b/src/scanner.h
index 8d61846..56760c3 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -281,8 +281,7 @@
     bool complete_;
   };
 
-  // Construction
-  explicit Scanner(ParserMode parse_mode);
+  Scanner();
 
   // Initialize the Scanner to scan source.
   void Initialize(Handle<String> source,
@@ -488,7 +487,6 @@
   TokenDesc current_;  // desc for current token (as returned by Next())
   TokenDesc next_;     // desc for next token (one token look-ahead)
   bool has_line_terminator_before_next_;
-  bool is_pre_parsing_;
   bool is_parsing_json_;
 
   // Different UTF16 buffers used to pull characters from. Based on input one of
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 34989d3..af7c0bd 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1227,7 +1227,7 @@
                                             String* fname) {
   ASSERT(generator_id >= 0 && generator_id < kNumCallGenerators);
   switch (generator_id) {
-#define CALL_GENERATOR_CASE(ignored1, ignored2, ignored3, name) \
+#define CALL_GENERATOR_CASE(ignored1, ignored2, name)           \
     case k##name##CallGenerator:                                \
       return CallStubCompiler::Compile##name##Call(object,      \
                                                    holder,      \
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 388bb52..c47cab7 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -370,13 +370,15 @@
                                                   Register prototype);
 
   // Generates prototype loading code that uses the objects from the
-  // context we were in when this function was called.  This ties the
-  // generated code to a particular context and so must not be used in
-  // cases where the generated code is not allowed to have references
-  // to objects from a context.
+  // context we were in when this function was called. If the context
+  // has changed, a jump to miss is performed. This ties the generated
+  // code to a particular context and so must not be used in cases
+  // where the generated code is not allowed to have references to
+  // objects from a context.
   static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
                                                         int index,
-                                                        Register prototype);
+                                                        Register prototype,
+                                                        Label* miss);
 
   static void GenerateFastPropertyLoad(MacroAssembler* masm,
                                        Register dst, Register src,
@@ -612,29 +614,25 @@
 // Installation of custom call generators for the selected builtins is
 // handled by the bootstrapper.
 //
-// Each entry has a name of a global function (lowercased), a flag
-// controlling whether the generator is set on the function itself or
-// on its instance prototype, a name of a builtin function on the
-// function or its instance prototype (the one the generator is set
-// for), and a name of a generator itself (used to build ids and
-// generator function names).
-#define CUSTOM_CALL_IC_GENERATORS(V)                          \
-  V(array, INSTANCE_PROTOTYPE, push, ArrayPush)               \
-  V(array, INSTANCE_PROTOTYPE, pop, ArrayPop)                 \
-  V(string, INSTANCE_PROTOTYPE, charCodeAt, StringCharCodeAt) \
-  V(string, INSTANCE_PROTOTYPE, charAt, StringCharAt)         \
-  V(string, FUNCTION, fromCharCode, StringFromCharCode)
+// Each entry has a name of a global object property holding an object
+// optionally followed by ".prototype" (this controls whether the
+// generator is set on the object itself or, in case it's a function,
+// on the its instance prototype), a name of a builtin function on the
+// object (the one the generator is set for), and a name of the
+// generator (used to build ids and generator function names).
+#define CUSTOM_CALL_IC_GENERATORS(V)                \
+  V(Array.prototype, push, ArrayPush)               \
+  V(Array.prototype, pop, ArrayPop)                 \
+  V(String.prototype, charCodeAt, StringCharCodeAt) \
+  V(String.prototype, charAt, StringCharAt)         \
+  V(String, fromCharCode, StringFromCharCode)       \
+  V(Math, floor, MathFloor)
 
 
 class CallStubCompiler: public StubCompiler {
  public:
-  enum CustomGeneratorOwner {
-    FUNCTION,
-    INSTANCE_PROTOTYPE
-  };
-
   enum {
-#define DECLARE_CALL_GENERATOR_ID(ignored1, ignore2, ignored3, name) \
+#define DECLARE_CALL_GENERATOR_ID(ignored1, ignore2, name) \
     k##name##CallGenerator,
     CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR_ID)
 #undef DECLARE_CALL_GENERATOR_ID
@@ -673,11 +671,11 @@
                             JSFunction* function,
                             String* name);
 
-#define DECLARE_CALL_GENERATOR(ignored1, ignored2, ignored3, name) \
-  Object* Compile##name##Call(Object* object,                      \
-                              JSObject* holder,                    \
-                              JSGlobalPropertyCell* cell,          \
-                              JSFunction* function,                \
+#define DECLARE_CALL_GENERATOR(ignored1, ignored2,  name) \
+  Object* Compile##name##Call(Object* object,             \
+                              JSObject* holder,           \
+                              JSGlobalPropertyCell* cell, \
+                              JSFunction* function,       \
                               String* fname);
   CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
 #undef DECLARE_CALL_GENERATOR
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 8c948cc..a8eb9d2 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -161,6 +161,8 @@
   SC(named_load_inline_miss, V8.NamedLoadInlineMiss)                  \
   SC(named_load_global_inline, V8.NamedLoadGlobalInline)              \
   SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss)     \
+  SC(named_load_global_stub, V8.NamedLoadGlobalStub)                  \
+  SC(named_load_global_stub_miss, V8.NamedLoadGlobalStubMiss)         \
   SC(keyed_store_field, V8.KeyedStoreField)                           \
   SC(keyed_store_inline, V8.KeyedStoreInline)                         \
   SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss)                \
diff --git a/src/version.cc b/src/version.cc
index c144ade..afc6ba2 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     4
-#define BUILD_NUMBER      4
+#define BUILD_NUMBER      5
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index b480412..e9729ea 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1989,7 +1989,7 @@
   __ j(negative, &done);
   // Read the value from the static offsets vector buffer and make it a smi.
   __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
-  __ Integer32ToSmi(rdi, rdi, &runtime);
+  __ Integer32ToSmi(rdi, rdi);
   // Store the smi value in the last match info.
   __ movq(FieldOperand(rbx,
                        rdx,
@@ -3343,7 +3343,7 @@
 
   // Look at the length of the result of adding the two strings.
   STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
-  __ SmiAdd(rbx, rbx, rcx, NULL);
+  __ SmiAdd(rbx, rbx, rcx);
   // Use the runtime system when adding two one character strings, as it
   // contains optimizations for this specific case using the symbol table.
   __ SmiCompare(rbx, Smi::FromInt(2));
@@ -3803,7 +3803,7 @@
   __ movq(rdx, Operand(rsp, kFromOffset));
   __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
 
-  __ SmiSub(rcx, rcx, rdx, NULL);  // Overflow doesn't happen.
+  __ SmiSub(rcx, rcx, rdx);  // Overflow doesn't happen.
   __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
   Label return_rax;
   __ j(equal, &return_rax);
@@ -3936,8 +3936,7 @@
   __ movq(scratch4, scratch1);
   __ SmiSub(scratch4,
             scratch4,
-            FieldOperand(right, String::kLengthOffset),
-            NULL);
+            FieldOperand(right, String::kLengthOffset));
   // Register scratch4 now holds left.length - right.length.
   const Register length_difference = scratch4;
   Label left_shorter;
@@ -3945,7 +3944,7 @@
   // The right string isn't longer that the left one.
   // Get the right string's length by subtracting the (non-negative) difference
   // from the left string's length.
-  __ SmiSub(scratch1, scratch1, length_difference, NULL);
+  __ SmiSub(scratch1, scratch1, length_difference);
   __ bind(&left_shorter);
   // Register scratch1 now holds Min(left.length, right.length).
   const Register min_length = scratch1;
diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc
index fd26535..9c96047 100644
--- a/src/x64/frames-x64.cc
+++ b/src/x64/frames-x64.cc
@@ -35,18 +35,8 @@
 namespace internal {
 
 
-
-
-StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
-  if (fp == 0) return NONE;
-  // Compute the stack pointer.
-  Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-  // Fill in the state.
-  state->fp = fp;
-  state->sp = sp;
-  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
-  ASSERT(*state->pc_address != NULL);
-  return EXIT;
+Address ExitFrame::ComputeStackPointer(Address fp) {
+  return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
 }
 
 
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index e4faafc..b357a9b 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -625,10 +625,7 @@
       __ pop(rdx);
 
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // Absence of a test rax instruction following the call
-      // indicates that none of the load was inlined.
-      __ nop();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
     }
   }
 }
@@ -941,8 +938,7 @@
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
-  __ call(ic, mode);
-  __ nop();  // Signal no inlined code.
+  EmitCallIC(ic, mode);
 }
 
 
@@ -1019,7 +1015,7 @@
                                                     slow));
           __ Move(rax, key_literal->handle());
           Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-          __ call(ic, RelocInfo::CODE_TARGET);
+          EmitCallIC(ic, RelocInfo::CODE_TARGET);
           __ jmp(done);
         }
       }
@@ -1043,11 +1039,7 @@
     __ Move(rcx, var->name());
     __ movq(rax, CodeGenerator::GlobalObject());
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-    // A test rax instruction following the call is used by the IC to
-    // indicate that the inobject property case was inlined.  Ensure there
-    // is no test rax instruction here.
-    __ nop();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
     Apply(context, rax);
 
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -1110,10 +1102,7 @@
 
     // Do a keyed property load.
     Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-    __ call(ic, RelocInfo::CODE_TARGET);
-    // Notice: We must not have a "test rax, ..." instruction after the
-    // call. It is treated specially by the LoadIC code.
-    __ nop();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
     Apply(context, rax);
   }
 }
@@ -1212,8 +1201,7 @@
           __ Move(rcx, key->handle());
           __ movq(rdx, Operand(rsp, 0));
           Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-          __ call(ic, RelocInfo::CODE_TARGET);
-          __ nop();
+          EmitCallIC(ic, RelocInfo::CODE_TARGET);
           break;
         }
         // Fall through.
@@ -1425,16 +1413,14 @@
   Literal* key = prop->key()->AsLiteral();
   __ Move(rcx, key->handle());
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 }
 
 
@@ -1553,8 +1539,7 @@
       __ pop(rax);  // Restore value.
       __ Move(rcx, prop->key()->AsLiteral()->handle());
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      __ nop();  // Signal no inlined code.
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
     case KEYED_PROPERTY: {
@@ -1565,8 +1550,7 @@
       __ pop(rdx);
       __ pop(rax);
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      __ nop();  // Signal no inlined code.
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
   }
@@ -1589,8 +1573,7 @@
     __ Move(rcx, var->name());
     __ movq(rdx, CodeGenerator::GlobalObject());
     Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    __ nop();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
     // Perform the assignment for non-const variables and for initialization
@@ -1674,8 +1657,7 @@
     __ pop(rdx);
   }
   Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1713,10 +1695,7 @@
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // This nop signals to the IC that there is no inlined code at the call
-  // site for it to patch.
-  __ nop();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1765,7 +1744,7 @@
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
                                                          in_loop);
-  __ Call(ic, mode);
+  EmitCallIC(ic, mode);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   Apply(context_, rax);
@@ -1789,7 +1768,7 @@
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
                                                               in_loop);
-  __ Call(ic, mode);
+  EmitCallIC(ic, mode);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   Apply(context_, rax);
@@ -1924,11 +1903,7 @@
         // Record source code position for IC call.
         SetSourcePosition(prop->position());
         Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-        __ call(ic, RelocInfo::CODE_TARGET);
-        // By emitting a nop we make sure that we do not have a "test rax,..."
-        // instruction after the call as it is treated specially
-        // by the LoadIC code.
-        __ nop();
+        EmitCallIC(ic, RelocInfo::CODE_TARGET);
         // Pop receiver.
         __ pop(rbx);
         // Push result (function).
@@ -2841,7 +2816,7 @@
     __ Move(rcx, expr->name());
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
     Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
-    __ call(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
     // Restore context register.
     __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3139,10 +3114,7 @@
       __ Move(rcx, prop->key()->AsLiteral()->handle());
       __ pop(rdx);
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // This nop signals to the IC that there is no inlined code at the call
-      // site for it to patch.
-      __ nop();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
         if (context_ != Expression::kEffect) {
           ApplyTOS(context_);
@@ -3156,10 +3128,7 @@
       __ pop(rcx);
       __ pop(rdx);
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // This nop signals to the IC that there is no inlined code at the call
-      // site for it to patch.
-      __ nop();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
         if (context_ != Expression::kEffect) {
           ApplyTOS(context_);
@@ -3182,8 +3151,7 @@
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    __ nop();  // Signal no inlined code.
+    EmitCallIC(ic, RelocInfo::CODE_TARGET);
     if (where == kStack) __ push(rax);
   } else if (proxy != NULL &&
              proxy->var()->slot() != NULL &&
@@ -3431,10 +3399,36 @@
 }
 
 
-Register FullCodeGenerator::result_register() { return rax; }
+Register FullCodeGenerator::result_register() {
+  return rax;
+}
 
 
-Register FullCodeGenerator::context_register() { return rsi; }
+Register FullCodeGenerator::context_register() {
+  return rsi;
+}
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+  ASSERT(mode == RelocInfo::CODE_TARGET ||
+         mode == RelocInfo::CODE_TARGET_CONTEXT);
+  __ call(ic, mode);
+
+  // If we're calling a (keyed) load or store stub, we have to mark
+  // the call as containing no inlined code so we will not attempt to
+  // patch it.
+  switch (ic->kind()) {
+    case Code::LOAD_IC:
+    case Code::KEYED_LOAD_IC:
+    case Code::STORE_IC:
+    case Code::KEYED_STORE_IC:
+      __ nop();  // Signals no inlined code.
+      break;
+    default:
+      // Do nothing.
+      break;
+  }
+}
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index a74e621..62e7691 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -730,7 +730,6 @@
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-  Label index_out_of_range;
 
   Register receiver = rdx;
   Register index = rax;
@@ -745,7 +744,7 @@
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
-                                          &index_out_of_range,
+                                          &miss,  // When index out of range.
                                           STRING_INDEX_IS_ARRAY_INDEX);
   char_at_generator.GenerateFast(masm);
   __ ret(0);
@@ -753,10 +752,6 @@
   ICRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm, call_helper);
 
-  __ bind(&index_out_of_range);
-  __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-  __ ret(0);
-
   __ bind(&miss);
   GenerateMiss(masm);
 }
@@ -847,7 +842,7 @@
     // For the UnsignedInt array type, we need to see whether
     // the value can be represented in a Smi. If not, we need to convert
     // it to a HeapNumber.
-    Label box_int;
+    NearLabel box_int;
 
     __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
 
@@ -1032,7 +1027,7 @@
   // No more bailouts to slow case on this path, so key not needed.
   __ SmiToInteger32(rdi, rax);
   {  // Clamp the value to [0..255].
-    Label done;
+    NearLabel done;
     __ testl(rdi, Immediate(0xFFFFFF00));
     __ j(zero, &done);
     __ setcc(negative, rdi);  // 1 if negative, 0 if positive.
@@ -1082,7 +1077,7 @@
   // rax: value
   // rbx: receiver's elements array (a FixedArray)
   // rcx: index
-  Label non_smi_value;
+  NearLabel non_smi_value;
   __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
           rax);
   __ JumpIfNotSmi(rax, &non_smi_value);
@@ -1104,7 +1099,7 @@
   //  -- rdx     : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
-  Label slow, check_heap_number;
+  Label slow;
 
   // Check that the object isn't a smi.
   __ JumpIfSmi(rdx, &slow);
@@ -1145,6 +1140,7 @@
   // rdx: receiver (a JSObject)
   // rbx: elements array
   // rdi: untagged key
+  NearLabel check_heap_number;
   __ JumpIfNotSmi(rax, &check_heap_number);
   // No more branches to slow case on this path.  Key and receiver not needed.
   __ SmiToInteger32(rdx, rax);
@@ -1488,7 +1484,7 @@
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
-  Label do_call, slow_call, slow_load, slow_reload_receiver;
+  Label do_call, slow_call, slow_load;
   Label check_number_dictionary, check_string, lookup_monomorphic_cache;
   Label index_smi, index_string;
 
@@ -1730,6 +1726,14 @@
 }
 
 
+bool LoadIC::PatchInlinedContextualLoad(Address address,
+                                        Object* map,
+                                        Object* cell) {
+  // TODO(<bug#>): implement this.
+  return false;
+}
+
+
 // The offset from the inlined patch site to the start of the inlined
 // store instruction.
 const int StoreIC::kOffsetToStoreInstruction = 20;
@@ -1880,7 +1884,7 @@
   //  -- rsp[0] : return address
   // -----------------------------------
 
-  Label miss, restore_miss;
+  Label miss;
 
   GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 165c51d..d62bed4 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -85,7 +85,7 @@
                                        Register scratch) {
   if (FLAG_debug_code) {
     // Check that the object is not in new space.
-    Label not_in_new_space;
+    NearLabel not_in_new_space;
     InNewSpace(object, scratch, not_equal, &not_in_new_space);
     Abort("new-space object passed to RecordWriteHelper");
     bind(&not_in_new_space);
@@ -171,7 +171,7 @@
   Label done;
 
   if (FLAG_debug_code) {
-    Label okay;
+    NearLabel okay;
     JumpIfNotSmi(object, &okay);
     Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
     bind(&okay);
@@ -221,42 +221,6 @@
   }
 }
 
-
-void MacroAssembler::InNewSpace(Register object,
-                                Register scratch,
-                                Condition cc,
-                                Label* branch) {
-  if (Serializer::enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    if (scratch.is(object)) {
-      movq(kScratchRegister, ExternalReference::new_space_mask());
-      and_(scratch, kScratchRegister);
-    } else {
-      movq(scratch, ExternalReference::new_space_mask());
-      and_(scratch, object);
-    }
-    movq(kScratchRegister, ExternalReference::new_space_start());
-    cmpq(scratch, kScratchRegister);
-    j(cc, branch);
-  } else {
-    ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
-    intptr_t new_space_start =
-        reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
-    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
-    if (scratch.is(object)) {
-      addq(scratch, kScratchRegister);
-    } else {
-      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
-    }
-    and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
-    j(cc, branch);
-  }
-}
-
-
 void MacroAssembler::Assert(Condition cc, const char* msg) {
   if (FLAG_debug_code) Check(cc, msg);
 }
@@ -264,7 +228,7 @@
 
 void MacroAssembler::AssertFastElements(Register elements) {
   if (FLAG_debug_code) {
-    Label ok;
+    NearLabel ok;
     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
                 Heap::kFixedArrayMapRootIndex);
     j(equal, &ok);
@@ -278,7 +242,7 @@
 
 
 void MacroAssembler::Check(Condition cc, const char* msg) {
-  Label L;
+  NearLabel L;
   j(cc, &L);
   Abort(msg);
   // will not return here
@@ -291,7 +255,7 @@
   int frame_alignment_mask = frame_alignment - 1;
   if (frame_alignment > kPointerSize) {
     ASSERT(IsPowerOf2(frame_alignment));
-    Label alignment_as_expected;
+    NearLabel alignment_as_expected;
     testq(rsp, Immediate(frame_alignment_mask));
     j(zero, &alignment_as_expected);
     // Abort if stack is not aligned.
@@ -304,7 +268,7 @@
 void MacroAssembler::NegativeZeroTest(Register result,
                                       Register op,
                                       Label* then_label) {
-  Label ok;
+  NearLabel ok;
   testl(result, result);
   j(not_zero, &ok);
   testl(op, op);
@@ -642,8 +606,6 @@
 // ----------------------------------------------------------------------------
 // Smi tagging, untagging and tag detection.
 
-static int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
 Register MacroAssembler::GetSmiConstant(Smi* source) {
   int value = source->value();
   if (value == 0) {
@@ -666,7 +628,7 @@
     if (allow_stub_calls()) {
       Assert(equal, "Uninitialized kSmiConstantRegister");
     } else {
-      Label ok;
+      NearLabel ok;
       j(equal, &ok);
       int3();
       bind(&ok);
@@ -716,6 +678,7 @@
   }
 }
 
+
 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
   ASSERT_EQ(0, kSmiTag);
   if (!dst.is(src)) {
@@ -725,22 +688,10 @@
 }
 
 
-void MacroAssembler::Integer32ToSmi(Register dst,
-                                    Register src,
-                                    Label* on_overflow) {
-  ASSERT_EQ(0, kSmiTag);
-  // 32-bit integer always fits in a long smi.
-  if (!dst.is(src)) {
-    movl(dst, src);
-  }
-  shl(dst, Immediate(kSmiShift));
-}
-
-
 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
   if (FLAG_debug_code) {
     testb(dst, Immediate(0x01));
-    Label ok;
+    NearLabel ok;
     j(zero, &ok);
     if (allow_stub_calls()) {
       Abort("Integer32ToSmiField writing to non-smi location");
@@ -949,180 +900,6 @@
 }
 
 
-void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
-  if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
-    movq(kScratchRegister, src);
-    neg(dst);  // Low 32 bits are retained as zero by negation.
-    // Test if result is zero or Smi::kMinValue.
-    cmpq(dst, kScratchRegister);
-    j(not_equal, on_smi_result);
-    movq(src, kScratchRegister);
-  } else {
-    movq(dst, src);
-    neg(dst);
-    cmpq(dst, src);
-    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
-    j(not_equal, on_smi_result);
-  }
-}
-
-
-void MacroAssembler::SmiAdd(Register dst,
-                            Register src1,
-                            Register src2,
-                            Label* on_not_smi_result) {
-  ASSERT(!dst.is(src2));
-  if (on_not_smi_result == NULL) {
-    // No overflow checking. Use only when it's known that
-    // overflowing is impossible.
-    if (dst.is(src1)) {
-      addq(dst, src2);
-    } else {
-      movq(dst, src1);
-      addq(dst, src2);
-    }
-    Assert(no_overflow, "Smi addition overflow");
-  } else if (dst.is(src1)) {
-    movq(kScratchRegister, src1);
-    addq(kScratchRegister, src2);
-    j(overflow, on_not_smi_result);
-    movq(dst, kScratchRegister);
-  } else {
-    movq(dst, src1);
-    addq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
-                            Register src1,
-                            Register src2,
-                            Label* on_not_smi_result) {
-  ASSERT(!dst.is(src2));
-  if (on_not_smi_result == NULL) {
-    // No overflow checking. Use only when it's known that
-    // overflowing is impossible (e.g., subtracting two positive smis).
-    if (dst.is(src1)) {
-      subq(dst, src2);
-    } else {
-      movq(dst, src1);
-      subq(dst, src2);
-    }
-    Assert(no_overflow, "Smi subtraction overflow");
-  } else if (dst.is(src1)) {
-    cmpq(dst, src2);
-    j(overflow, on_not_smi_result);
-    subq(dst, src2);
-  } else {
-    movq(dst, src1);
-    subq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
-                            Register src1,
-                            const Operand& src2,
-                            Label* on_not_smi_result) {
-  if (on_not_smi_result == NULL) {
-    // No overflow checking. Use only when it's known that
-    // overflowing is impossible (e.g., subtracting two positive smis).
-    if (dst.is(src1)) {
-      subq(dst, src2);
-    } else {
-      movq(dst, src1);
-      subq(dst, src2);
-    }
-    Assert(no_overflow, "Smi subtraction overflow");
-  } else if (dst.is(src1)) {
-    movq(kScratchRegister, src2);
-    cmpq(src1, kScratchRegister);
-    j(overflow, on_not_smi_result);
-    subq(src1, kScratchRegister);
-  } else {
-    movq(dst, src1);
-    subq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-void MacroAssembler::SmiMul(Register dst,
-                            Register src1,
-                            Register src2,
-                            Label* on_not_smi_result) {
-  ASSERT(!dst.is(src2));
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-
-  if (dst.is(src1)) {
-    Label failure, zero_correct_result;
-    movq(kScratchRegister, src1);  // Create backup for later testing.
-    SmiToInteger64(dst, src1);
-    imul(dst, src2);
-    j(overflow, &failure);
-
-    // Check for negative zero result.  If product is zero, and one
-    // argument is negative, go to slow case.
-    Label correct_result;
-    testq(dst, dst);
-    j(not_zero, &correct_result);
-
-    movq(dst, kScratchRegister);
-    xor_(dst, src2);
-    j(positive, &zero_correct_result);  // Result was positive zero.
-
-    bind(&failure);  // Reused failure exit, restores src1.
-    movq(src1, kScratchRegister);
-    jmp(on_not_smi_result);
-
-    bind(&zero_correct_result);
-    xor_(dst, dst);
-
-    bind(&correct_result);
-  } else {
-    SmiToInteger64(dst, src1);
-    imul(dst, src2);
-    j(overflow, on_not_smi_result);
-    // Check for negative zero result.  If product is zero, and one
-    // argument is negative, go to slow case.
-    Label correct_result;
-    testq(dst, dst);
-    j(not_zero, &correct_result);
-    // One of src1 and src2 is zero, the check whether the other is
-    // negative.
-    movq(kScratchRegister, src1);
-    xor_(kScratchRegister, src2);
-    j(negative, on_not_smi_result);
-    bind(&correct_result);
-  }
-}
-
-
-void MacroAssembler::SmiTryAddConstant(Register dst,
-                                       Register src,
-                                       Smi* constant,
-                                       Label* on_not_smi_result) {
-  // Does not assume that src is a smi.
-  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src.is(kScratchRegister));
-
-  JumpIfNotSmi(src, on_not_smi_result);
-  Register tmp = (dst.is(src) ? kScratchRegister : dst);
-  LoadSmiConstant(tmp, constant);
-  addq(tmp, src);
-  j(overflow, on_not_smi_result);
-  if (dst.is(src)) {
-    movq(dst, tmp);
-  }
-}
-
-
 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
   if (constant->value() == 0) {
     if (!dst.is(src)) {
@@ -1179,29 +956,6 @@
 }
 
 
-void MacroAssembler::SmiAddConstant(Register dst,
-                                    Register src,
-                                    Smi* constant,
-                                    Label* on_not_smi_result) {
-  if (constant->value() == 0) {
-    if (!dst.is(src)) {
-      movq(dst, src);
-    }
-  } else if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
-
-    LoadSmiConstant(kScratchRegister, constant);
-    addq(kScratchRegister, src);
-    j(overflow, on_not_smi_result);
-    movq(dst, kScratchRegister);
-  } else {
-    LoadSmiConstant(dst, constant);
-    addq(dst, src);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
   if (constant->value() == 0) {
     if (!dst.is(src)) {
@@ -1226,165 +980,48 @@
 }
 
 
-void MacroAssembler::SmiSubConstant(Register dst,
-                                    Register src,
-                                    Smi* constant,
-                                    Label* on_not_smi_result) {
-  if (constant->value() == 0) {
-    if (!dst.is(src)) {
-      movq(dst, src);
-    }
-  } else if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
-    if (constant->value() == Smi::kMinValue) {
-      // Subtracting min-value from any non-negative value will overflow.
-      // We test the non-negativeness before doing the subtraction.
-      testq(src, src);
-      j(not_sign, on_not_smi_result);
-      LoadSmiConstant(kScratchRegister, constant);
-      subq(dst, kScratchRegister);
-    } else {
-      // Subtract by adding the negation.
-      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
-      addq(kScratchRegister, dst);
-      j(overflow, on_not_smi_result);
-      movq(dst, kScratchRegister);
-    }
+void MacroAssembler::SmiAdd(Register dst,
+                            Register src1,
+                            Register src2) {
+  // No overflow checking. Use only when it's known that
+  // overflowing is impossible.
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    addq(dst, src2);
   } else {
-    if (constant->value() == Smi::kMinValue) {
-      // Subtracting min-value from any non-negative value will overflow.
-      // We test the non-negativeness before doing the subtraction.
-      testq(src, src);
-      j(not_sign, on_not_smi_result);
-      LoadSmiConstant(dst, constant);
-      // Adding and subtracting the min-value gives the same result, it only
-      // differs on the overflow bit, which we don't check here.
-      addq(dst, src);
-    } else {
-      // Subtract by adding the negation.
-      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
-      addq(dst, src);
-      j(overflow, on_not_smi_result);
-    }
+    movq(dst, src1);
+    addq(dst, src2);
   }
+  Assert(no_overflow, "Smi addition overflow");
 }
 
 
-void MacroAssembler::SmiDiv(Register dst,
-                            Register src1,
-                            Register src2,
-                            Label* on_not_smi_result) {
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src2.is(rax));
-  ASSERT(!src2.is(rdx));
-  ASSERT(!src1.is(rdx));
-
-  // Check for 0 divisor (result is +/-Infinity).
-  Label positive_divisor;
-  testq(src2, src2);
-  j(zero, on_not_smi_result);
-
-  if (src1.is(rax)) {
-    movq(kScratchRegister, src1);
-  }
-  SmiToInteger32(rax, src1);
-  // We need to rule out dividing Smi::kMinValue by -1, since that would
-  // overflow in idiv and raise an exception.
-  // We combine this with negative zero test (negative zero only happens
-  // when dividing zero by a negative number).
-
-  // We overshoot a little and go to slow case if we divide min-value
-  // by any negative value, not just -1.
-  Label safe_div;
-  testl(rax, Immediate(0x7fffffff));
-  j(not_zero, &safe_div);
-  testq(src2, src2);
-  if (src1.is(rax)) {
-    j(positive, &safe_div);
-    movq(src1, kScratchRegister);
-    jmp(on_not_smi_result);
+void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
+  // No overflow checking. Use only when it's known that
+  // overflowing is impossible (e.g., subtracting two positive smis).
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    subq(dst, src2);
   } else {
-    j(negative, on_not_smi_result);
+    movq(dst, src1);
+    subq(dst, src2);
   }
-  bind(&safe_div);
-
-  SmiToInteger32(src2, src2);
-  // Sign extend src1 into edx:eax.
-  cdq();
-  idivl(src2);
-  Integer32ToSmi(src2, src2);
-  // Check that the remainder is zero.
-  testl(rdx, rdx);
-  if (src1.is(rax)) {
-    Label smi_result;
-    j(zero, &smi_result);
-    movq(src1, kScratchRegister);
-    jmp(on_not_smi_result);
-    bind(&smi_result);
-  } else {
-    j(not_zero, on_not_smi_result);
-  }
-  if (!dst.is(src1) && src1.is(rax)) {
-    movq(src1, kScratchRegister);
-  }
-  Integer32ToSmi(dst, rax);
+  Assert(no_overflow, "Smi subtraction overflow");
 }
 
 
-void MacroAssembler::SmiMod(Register dst,
+void MacroAssembler::SmiSub(Register dst,
                             Register src1,
-                            Register src2,
-                            Label* on_not_smi_result) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!src2.is(rax));
-  ASSERT(!src2.is(rdx));
-  ASSERT(!src1.is(rdx));
-  ASSERT(!src1.is(src2));
-
-  testq(src2, src2);
-  j(zero, on_not_smi_result);
-
-  if (src1.is(rax)) {
-    movq(kScratchRegister, src1);
+                            const Operand& src2) {
+  // No overflow checking. Use only when it's known that
+  // overflowing is impossible (e.g., subtracting two positive smis).
+  if (dst.is(src1)) {
+    subq(dst, src2);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
   }
-  SmiToInteger32(rax, src1);
-  SmiToInteger32(src2, src2);
-
-  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
-  Label safe_div;
-  cmpl(rax, Immediate(Smi::kMinValue));
-  j(not_equal, &safe_div);
-  cmpl(src2, Immediate(-1));
-  j(not_equal, &safe_div);
-  // Retag inputs and go slow case.
-  Integer32ToSmi(src2, src2);
-  if (src1.is(rax)) {
-    movq(src1, kScratchRegister);
-  }
-  jmp(on_not_smi_result);
-  bind(&safe_div);
-
-  // Sign extend eax into edx:eax.
-  cdq();
-  idivl(src2);
-  // Restore smi tags on inputs.
-  Integer32ToSmi(src2, src2);
-  if (src1.is(rax)) {
-    movq(src1, kScratchRegister);
-  }
-  // Check for a negative zero result.  If the result is zero, and the
-  // dividend is negative, go slow to return a floating point negative zero.
-  Label smi_result;
-  testl(rdx, rdx);
-  j(not_zero, &smi_result);
-  testq(src1, src1);
-  j(negative, on_not_smi_result);
-  bind(&smi_result);
-  Integer32ToSmi(dst, rdx);
+  Assert(no_overflow, "Smi subtraction overflow");
 }
 
 
@@ -1480,25 +1117,6 @@
 }
 
 
-void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
-                                                  Register src,
-                                                  int shift_value,
-                                                  Label* on_not_smi_result) {
-  // Logic right shift interprets its result as an *unsigned* number.
-  if (dst.is(src)) {
-    UNIMPLEMENTED();  // Not used.
-  } else {
-    movq(dst, src);
-    if (shift_value == 0) {
-      testq(dst, dst);
-      j(negative, on_not_smi_result);
-    }
-    shr(dst, Immediate(shift_value + kSmiShift));
-    shl(dst, Immediate(kSmiShift));
-  }
-}
-
-
 void MacroAssembler::SmiShiftLeftConstant(Register dst,
                                           Register src,
                                           int shift_value) {
@@ -1515,7 +1133,7 @@
                                   Register src1,
                                   Register src2) {
   ASSERT(!dst.is(rcx));
-  Label result_ok;
+  NearLabel result_ok;
   // Untag shift amount.
   if (!dst.is(src1)) {
     movq(dst, src1);
@@ -1527,42 +1145,6 @@
 }
 
 
-void MacroAssembler::SmiShiftLogicalRight(Register dst,
-                                          Register src1,
-                                          Register src2,
-                                          Label* on_not_smi_result) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(rcx));
-  Label result_ok;
-  if (src1.is(rcx) || src2.is(rcx)) {
-    movq(kScratchRegister, rcx);
-  }
-  if (!dst.is(src1)) {
-    movq(dst, src1);
-  }
-  SmiToInteger32(rcx, src2);
-  orl(rcx, Immediate(kSmiShift));
-  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
-  shl(dst, Immediate(kSmiShift));
-  testq(dst, dst);
-  if (src1.is(rcx) || src2.is(rcx)) {
-    Label positive_result;
-    j(positive, &positive_result);
-    if (src1.is(rcx)) {
-      movq(src1, kScratchRegister);
-    } else {
-      movq(src2, kScratchRegister);
-    }
-    jmp(on_not_smi_result);
-    bind(&positive_result);
-  } else {
-    j(negative, on_not_smi_result);  // src2 was zero and src1 negative.
-  }
-}
-
-
 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
                                              Register src1,
                                              Register src2) {
@@ -1590,44 +1172,6 @@
 }
 
 
-void MacroAssembler::SelectNonSmi(Register dst,
-                                  Register src1,
-                                  Register src2,
-                                  Label* on_not_smis) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(src1));
-  ASSERT(!dst.is(src2));
-  // Both operands must not be smis.
-#ifdef DEBUG
-  if (allow_stub_calls()) {  // Check contains a stub call.
-    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
-    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
-  }
-#endif
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT_EQ(0, Smi::FromInt(0));
-  movl(kScratchRegister, Immediate(kSmiTagMask));
-  and_(kScratchRegister, src1);
-  testl(kScratchRegister, src2);
-  // If non-zero then both are smis.
-  j(not_zero, on_not_smis);
-
-  // Exactly one operand is a smi.
-  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
-  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
-  subq(kScratchRegister, Immediate(1));
-  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
-  movq(dst, src1);
-  xor_(dst, src2);
-  and_(dst, kScratchRegister);
-  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
-  xor_(dst, src1);
-  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
-}
-
-
 SmiIndex MacroAssembler::SmiToIndex(Register dst,
                                     Register src,
                                     int shift) {
@@ -1663,138 +1207,6 @@
 }
 
 
-void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
-  ASSERT_EQ(0, kSmiTag);
-  Condition smi = CheckSmi(src);
-  j(smi, on_smi);
-}
-
-
-void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
-  Condition smi = CheckSmi(src);
-  j(NegateCondition(smi), on_not_smi);
-}
-
-
-void MacroAssembler::JumpIfNotPositiveSmi(Register src,
-                                          Label* on_not_positive_smi) {
-  Condition positive_smi = CheckPositiveSmi(src);
-  j(NegateCondition(positive_smi), on_not_positive_smi);
-}
-
-
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
-                                             Smi* constant,
-                                             Label* on_equals) {
-  SmiCompare(src, constant);
-  j(equal, on_equals);
-}
-
-
-void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
-  Condition is_valid = CheckInteger32ValidSmiValue(src);
-  j(NegateCondition(is_valid), on_invalid);
-}
-
-
-void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
-                                                Label* on_invalid) {
-  Condition is_valid = CheckUInteger32ValidSmiValue(src);
-  j(NegateCondition(is_valid), on_invalid);
-}
-
-
-void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
-                                      Label* on_not_both_smi) {
-  Condition both_smi = CheckBothSmi(src1, src2);
-  j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2,
-                                              Label* on_not_both_smi) {
-  Condition both_smi = CheckBothPositiveSmi(src1, src2);
-  j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
-                                                         Register second_object,
-                                                         Register scratch1,
-                                                         Register scratch2,
-                                                         Label* on_fail) {
-  // Check that both objects are not smis.
-  Condition either_smi = CheckEitherSmi(first_object, second_object);
-  j(either_smi, on_fail);
-
-  // Load instance type for both strings.
-  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
-  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
-  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
-  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
-  // Check that both are flat ascii strings.
-  ASSERT(kNotStringTag != 0);
-  const int kFlatAsciiStringMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
-  andl(scratch1, Immediate(kFlatAsciiStringMask));
-  andl(scratch2, Immediate(kFlatAsciiStringMask));
-  // Interleave the bits to check both scratch1 and scratch2 in one test.
-  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
-  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
-  cmpl(scratch1,
-       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
-  j(not_equal, on_fail);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
-    Register instance_type,
-    Register scratch,
-    Label *failure) {
-  if (!scratch.is(instance_type)) {
-    movl(scratch, instance_type);
-  }
-
-  const int kFlatAsciiStringMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-
-  andl(scratch, Immediate(kFlatAsciiStringMask));
-  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
-  j(not_equal, failure);
-}
-
-
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
-    Register first_object_instance_type,
-    Register second_object_instance_type,
-    Register scratch1,
-    Register scratch2,
-    Label* on_fail) {
-  // Load instance type for both strings.
-  movq(scratch1, first_object_instance_type);
-  movq(scratch2, second_object_instance_type);
-
-  // Check that both are flat ascii strings.
-  ASSERT(kNotStringTag != 0);
-  const int kFlatAsciiStringMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
-  andl(scratch1, Immediate(kFlatAsciiStringMask));
-  andl(scratch2, Immediate(kFlatAsciiStringMask));
-  // Interleave the bits to check both scratch1 and scratch2 in one test.
-  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
-  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
-  cmpl(scratch1,
-       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
-  j(not_equal, on_fail);
-}
-
-
 void MacroAssembler::Move(Register dst, Handle<Object> source) {
   ASSERT(!source->IsFailure());
   if (source->IsSmi()) {
@@ -1903,7 +1315,6 @@
 
 void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
   ASSERT(RelocInfo::IsCodeTarget(rmode));
-  WriteRecordedPositions();
   call(code_object, rmode);
 }
 
@@ -1994,7 +1405,7 @@
 
 
 void MacroAssembler::AbortIfNotNumber(Register object) {
-  Label ok;
+  NearLabel ok;
   Condition is_smi = CheckSmi(object);
   j(is_smi, &ok);
   Cmp(FieldOperand(object, HeapObject::kMapOffset),
@@ -2005,14 +1416,14 @@
 
 
 void MacroAssembler::AbortIfSmi(Register object) {
-  Label ok;
+  NearLabel ok;
   Condition is_smi = CheckSmi(object);
   Assert(NegateCondition(is_smi), "Operand is a smi");
 }
 
 
 void MacroAssembler::AbortIfNotSmi(Register object) {
-  Label ok;
+  NearLabel ok;
   Condition is_smi = CheckSmi(object);
   Assert(is_smi, "Operand is not a smi");
 }
@@ -2052,7 +1463,7 @@
   j(not_equal, miss);
 
   // Make sure that the function has an instance prototype.
-  Label non_instance;
+  NearLabel non_instance;
   testb(FieldOperand(result, Map::kBitFieldOffset),
         Immediate(1 << Map::kHasNonInstancePrototype));
   j(not_zero, &non_instance);
@@ -2068,7 +1479,7 @@
   j(equal, miss);
 
   // If the function does not have an initial map, we're done.
-  Label done;
+  NearLabel done;
   CmpObjectType(result, MAP_TYPE, kScratchRegister);
   j(not_equal, &done);
 
@@ -2133,76 +1544,11 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
-                                    const ParameterCount& actual,
-                                    Handle<Code> code_constant,
-                                    Register code_register,
-                                    Label* done,
-                                    InvokeFlag flag) {
-  bool definitely_matches = false;
-  Label invoke;
-  if (expected.is_immediate()) {
-    ASSERT(actual.is_immediate());
-    if (expected.immediate() == actual.immediate()) {
-      definitely_matches = true;
-    } else {
-      Set(rax, actual.immediate());
-      if (expected.immediate() ==
-              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
-        // Don't worry about adapting arguments for built-ins that
-        // don't want that done. Skip adaption code by making it look
-        // like we have a match between expected and actual number of
-        // arguments.
-        definitely_matches = true;
-      } else {
-        Set(rbx, expected.immediate());
-      }
-    }
-  } else {
-    if (actual.is_immediate()) {
-      // Expected is in register, actual is immediate. This is the
-      // case when we invoke function values without going through the
-      // IC mechanism.
-      cmpq(expected.reg(), Immediate(actual.immediate()));
-      j(equal, &invoke);
-      ASSERT(expected.reg().is(rbx));
-      Set(rax, actual.immediate());
-    } else if (!expected.reg().is(actual.reg())) {
-      // Both expected and actual are in (different) registers. This
-      // is the case when we invoke functions using call and apply.
-      cmpq(expected.reg(), actual.reg());
-      j(equal, &invoke);
-      ASSERT(actual.reg().is(rax));
-      ASSERT(expected.reg().is(rbx));
-    }
-  }
-
-  if (!definitely_matches) {
-    Handle<Code> adaptor =
-        Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
-    if (!code_constant.is_null()) {
-      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
-      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    } else if (!code_register.is(rdx)) {
-      movq(rdx, code_register);
-    }
-
-    if (flag == CALL_FUNCTION) {
-      Call(adaptor, RelocInfo::CODE_TARGET);
-      jmp(done);
-    } else {
-      Jump(adaptor, RelocInfo::CODE_TARGET);
-    }
-    bind(&invoke);
-  }
-}
-
-
 void MacroAssembler::InvokeCode(Register code,
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 InvokeFlag flag) {
-  Label done;
+  NearLabel done;
   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
   if (flag == CALL_FUNCTION) {
     call(code);
@@ -2219,7 +1565,7 @@
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag) {
-  Label done;
+  NearLabel done;
   Register dummy = rax;
   InvokePrologue(expected, actual, code, dummy, &done, flag);
   if (flag == CALL_FUNCTION) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 9f5a746..503c7f2 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -91,10 +91,11 @@
   // Check if object is in new space. The condition cc can be equal or
   // not_equal. If it is equal a jump will be done if the object is on new
   // space. The register scratch can be object itself, but it will be clobbered.
+  template <typename LabelType>
   void InNewSpace(Register object,
                   Register scratch,
                   Condition cc,
-                  Label* branch);
+                  LabelType* branch);
 
   // For page containing |object| mark region covering [object+offset]
   // dirty. |object| is the object being stored into, |value| is the
@@ -215,14 +216,9 @@
 
   // Tag an integer value. The result must be known to be a valid smi value.
   // Only uses the low 32 bits of the src register. Sets the N and Z flags
-  // based on the value of the resulting integer.
+  // based on the value of the resulting smi.
   void Integer32ToSmi(Register dst, Register src);
 
-  // Tag an integer value if possible, or jump the integer value cannot be
-  // represented as a smi. Only uses the low 32 bit of the src registers.
-  // NOTICE: Destroys the dst register even if unsuccessful!
-  void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
-
   // Stores an integer32 value into a memory field that already holds a smi.
   void Integer32ToSmiField(const Operand& dst, Register src);
 
@@ -300,30 +296,42 @@
   // above with a conditional jump.
 
   // Jump if the value cannot be represented by a smi.
-  void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
+  template <typename LabelType>
+  void JumpIfNotValidSmiValue(Register src, LabelType* on_invalid);
 
   // Jump if the unsigned integer value cannot be represented by a smi.
-  void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid);
+  template <typename LabelType>
+  void JumpIfUIntNotValidSmiValue(Register src, LabelType* on_invalid);
 
   // Jump to label if the value is a tagged smi.
-  void JumpIfSmi(Register src, Label* on_smi);
+  template <typename LabelType>
+  void JumpIfSmi(Register src, LabelType* on_smi);
 
   // Jump to label if the value is not a tagged smi.
-  void JumpIfNotSmi(Register src, Label* on_not_smi);
+  template <typename LabelType>
+  void JumpIfNotSmi(Register src, LabelType* on_not_smi);
 
   // Jump to label if the value is not a positive tagged smi.
-  void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
+  template <typename LabelType>
+  void JumpIfNotPositiveSmi(Register src, LabelType* on_not_smi);
 
   // Jump to label if the value, which must be a tagged smi, has value equal
   // to the constant.
-  void JumpIfSmiEqualsConstant(Register src,  Smi* constant, Label* on_equals);
+  template <typename LabelType>
+  void JumpIfSmiEqualsConstant(Register src,
+                               Smi* constant,
+                               LabelType* on_equals);
 
   // Jump if either or both register are not smi values.
-  void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
+  template <typename LabelType>
+  void JumpIfNotBothSmi(Register src1,
+                        Register src2,
+                        LabelType* on_not_both_smi);
 
   // Jump if either or both register are not positive smi values.
+  template <typename LabelType>
   void JumpIfNotBothPositiveSmi(Register src1, Register src2,
-                                Label* on_not_both_smi);
+                                LabelType* on_not_both_smi);
 
   // Operations on tagged smi values.
 
@@ -333,10 +341,11 @@
   // Optimistically adds an integer constant to a supposed smi.
   // If the src is not a smi, or the result is not a smi, jump to
   // the label.
+  template <typename LabelType>
   void SmiTryAddConstant(Register dst,
                          Register src,
                          Smi* constant,
-                         Label* on_not_smi_result);
+                         LabelType* on_not_smi_result);
 
   // Add an integer constant to a tagged smi, giving a tagged smi as result.
   // No overflow testing on the result is done.
@@ -348,10 +357,11 @@
 
   // Add an integer constant to a tagged smi, giving a tagged smi as result,
   // or jumping to a label if the result cannot be represented by a smi.
+  template <typename LabelType>
   void SmiAddConstant(Register dst,
                       Register src,
                       Smi* constant,
-                      Label* on_not_smi_result);
+                      LabelType* on_not_smi_result);
 
   // Subtract an integer constant from a tagged smi, giving a tagged smi as
   // result. No testing on the result is done. Sets the N and Z flags
@@ -360,60 +370,80 @@
 
   // Subtract an integer constant from a tagged smi, giving a tagged smi as
   // result, or jumping to a label if the result cannot be represented by a smi.
+  template <typename LabelType>
   void SmiSubConstant(Register dst,
                       Register src,
                       Smi* constant,
-                      Label* on_not_smi_result);
+                      LabelType* on_not_smi_result);
 
   // Negating a smi can give a negative zero or too large positive value.
   // NOTICE: This operation jumps on success, not failure!
+  template <typename LabelType>
   void SmiNeg(Register dst,
               Register src,
-              Label* on_smi_result);
+              LabelType* on_smi_result);
 
   // Adds smi values and return the result as a smi.
   // If dst is src1, then src1 will be destroyed, even if
   // the operation is unsuccessful.
+  template <typename LabelType>
   void SmiAdd(Register dst,
               Register src1,
               Register src2,
-              Label* on_not_smi_result);
+              LabelType* on_not_smi_result);
+
+  void SmiAdd(Register dst,
+              Register src1,
+              Register src2);
 
   // Subtracts smi values and return the result as a smi.
   // If dst is src1, then src1 will be destroyed, even if
   // the operation is unsuccessful.
+  template <typename LabelType>
   void SmiSub(Register dst,
               Register src1,
               Register src2,
-              Label* on_not_smi_result);
+              LabelType* on_not_smi_result);
 
   void SmiSub(Register dst,
               Register src1,
+              Register src2);
+
+  template <typename LabelType>
+  void SmiSub(Register dst,
+              Register src1,
               const Operand& src2,
-              Label* on_not_smi_result);
+              LabelType* on_not_smi_result);
+
+  void SmiSub(Register dst,
+              Register src1,
+              const Operand& src2);
 
   // Multiplies smi values and return the result as a smi,
   // if possible.
   // If dst is src1, then src1 will be destroyed, even if
   // the operation is unsuccessful.
+  template <typename LabelType>
   void SmiMul(Register dst,
               Register src1,
               Register src2,
-              Label* on_not_smi_result);
+              LabelType* on_not_smi_result);
 
   // Divides one smi by another and returns the quotient.
   // Clobbers rax and rdx registers.
+  template <typename LabelType>
   void SmiDiv(Register dst,
               Register src1,
               Register src2,
-              Label* on_not_smi_result);
+              LabelType* on_not_smi_result);
 
   // Divides one smi by another and returns the remainder.
   // Clobbers rax and rdx registers.
+  template <typename LabelType>
   void SmiMod(Register dst,
               Register src1,
               Register src2,
-              Label* on_not_smi_result);
+              LabelType* on_not_smi_result);
 
   // Bitwise operations.
   void SmiNot(Register dst, Register src);
@@ -427,10 +457,11 @@
   void SmiShiftLeftConstant(Register dst,
                             Register src,
                             int shift_value);
+  template <typename LabelType>
   void SmiShiftLogicalRightConstant(Register dst,
                                   Register src,
                                   int shift_value,
-                                  Label* on_not_smi_result);
+                                  LabelType* on_not_smi_result);
   void SmiShiftArithmeticRightConstant(Register dst,
                                        Register src,
                                        int shift_value);
@@ -443,10 +474,11 @@
   // Shifts a smi value to the right, shifting in zero bits at the top, and
   // returns the unsigned intepretation of the result if that is a smi.
   // Uses and clobbers rcx, so dst may not be rcx.
+  template <typename LabelType>
   void SmiShiftLogicalRight(Register dst,
-                          Register src1,
-                          Register src2,
-                          Label* on_not_smi_result);
+                            Register src1,
+                            Register src2,
+                            LabelType* on_not_smi_result);
   // Shifts a smi value to the right, sign extending the top, and
   // returns the signed intepretation of the result. That will always
   // be a valid smi value, since it's numerically smaller than the
@@ -460,10 +492,11 @@
 
   // Select the non-smi register of two registers where exactly one is a
   // smi. If neither are smis, jump to the failure label.
+  template <typename LabelType>
   void SelectNonSmi(Register dst,
                     Register src1,
                     Register src2,
-                    Label* on_not_smis);
+                    LabelType* on_not_smis);
 
   // Converts, if necessary, a smi to a combination of number and
   // multiplier to be used as a scaled index.
@@ -493,25 +526,29 @@
 
   // ---------------------------------------------------------------------------
   // String macros.
+  template <typename LabelType>
   void JumpIfNotBothSequentialAsciiStrings(Register first_object,
                                            Register second_object,
                                            Register scratch1,
                                            Register scratch2,
-                                           Label* on_not_both_flat_ascii);
+                                           LabelType* on_not_both_flat_ascii);
 
   // Check whether the instance type represents a flat ascii string. Jump to the
   // label if not. If the instance type can be scratched specify same register
   // for both instance type and scratch.
-  void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
-                                              Register scratch,
-                                              Label *on_not_flat_ascii_string);
+  template <typename LabelType>
+  void JumpIfInstanceTypeIsNotSequentialAscii(
+      Register instance_type,
+      Register scratch,
+      LabelType *on_not_flat_ascii_string);
 
+  template <typename LabelType>
   void JumpIfBothInstanceTypesAreNotSequentialAscii(
       Register first_object_instance_type,
       Register second_object_instance_type,
       Register scratch1,
       Register scratch2,
-      Label* on_fail);
+      LabelType* on_fail);
 
   // ---------------------------------------------------------------------------
   // Macro instructions.
@@ -865,11 +902,12 @@
   Handle<Object> code_object_;
 
   // Helper functions for generating invokes.
+  template <typename LabelType>
   void InvokePrologue(const ParameterCount& expected,
                       const ParameterCount& actual,
                       Handle<Code> code_constant,
                       Register code_register,
-                      Label* done,
+                      LabelType* done,
                       InvokeFlag flag);
 
   // Activation support.
@@ -961,6 +999,697 @@
 #define ACCESS_MASM(masm) masm->
 #endif
 
+// -----------------------------------------------------------------------------
+// Template implementations.
+
+static int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+
+template <typename LabelType>
+void MacroAssembler::SmiNeg(Register dst,
+                            Register src,
+                            LabelType* on_smi_result) {
+  if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    movq(kScratchRegister, src);
+    neg(dst);  // Low 32 bits are retained as zero by negation.
+    // Test if result is zero or Smi::kMinValue.
+    cmpq(dst, kScratchRegister);
+    j(not_equal, on_smi_result);
+    movq(src, kScratchRegister);
+  } else {
+    movq(dst, src);
+    neg(dst);
+    cmpq(dst, src);
+    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+    j(not_equal, on_smi_result);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiAdd(Register dst,
+                            Register src1,
+                            Register src2,
+                            LabelType* on_not_smi_result) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src1);
+    addq(kScratchRegister, src2);
+    j(overflow, on_not_smi_result);
+    movq(dst, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    addq(dst, src2);
+    j(overflow, on_not_smi_result);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSub(Register dst,
+                            Register src1,
+                            Register src2,
+                            LabelType* on_not_smi_result) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    cmpq(dst, src2);
+    j(overflow, on_not_smi_result);
+    subq(dst, src2);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
+    j(overflow, on_not_smi_result);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSub(Register dst,
+                            Register src1,
+                            const Operand& src2,
+                            LabelType* on_not_smi_result) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src2);
+    cmpq(src1, kScratchRegister);
+    j(overflow, on_not_smi_result);
+    subq(src1, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
+    j(overflow, on_not_smi_result);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiMul(Register dst,
+                            Register src1,
+                            Register src2,
+                            LabelType* on_not_smi_result) {
+  ASSERT(!dst.is(src2));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+
+  if (dst.is(src1)) {
+    NearLabel failure, zero_correct_result;
+    movq(kScratchRegister, src1);  // Create backup for later testing.
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, &failure);
+
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    NearLabel correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result);
+
+    movq(dst, kScratchRegister);
+    xor_(dst, src2);
+    j(positive, &zero_correct_result);  // Result was positive zero.
+
+    bind(&failure);  // Reused failure exit, restores src1.
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result);
+
+    bind(&zero_correct_result);
+    xor_(dst, dst);
+
+    bind(&correct_result);
+  } else {
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, on_not_smi_result);
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    NearLabel correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result);
+    // One of src1 and src2 is zero, the check whether the other is
+    // negative.
+    movq(kScratchRegister, src1);
+    xor_(kScratchRegister, src2);
+    j(negative, on_not_smi_result);
+    bind(&correct_result);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiTryAddConstant(Register dst,
+                                       Register src,
+                                       Smi* constant,
+                                       LabelType* on_not_smi_result) {
+  // Does not assume that src is a smi.
+  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src.is(kScratchRegister));
+
+  JumpIfNotSmi(src, on_not_smi_result);
+  Register tmp = (dst.is(src) ? kScratchRegister : dst);
+  LoadSmiConstant(tmp, constant);
+  addq(tmp, src);
+  j(overflow, on_not_smi_result);
+  if (dst.is(src)) {
+    movq(dst, tmp);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiAddConstant(Register dst,
+                                    Register src,
+                                    Smi* constant,
+                                    LabelType* on_not_smi_result) {
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
+    }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+
+    LoadSmiConstant(kScratchRegister, constant);
+    addq(kScratchRegister, src);
+    j(overflow, on_not_smi_result);
+    movq(dst, kScratchRegister);
+  } else {
+    LoadSmiConstant(dst, constant);
+    addq(dst, src);
+    j(overflow, on_not_smi_result);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSubConstant(Register dst,
+                                    Register src,
+                                    Smi* constant,
+                                    LabelType* on_not_smi_result) {
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
+    }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    if (constant->value() == Smi::kMinValue) {
+      // Subtracting min-value from any non-negative value will overflow.
+      // We test the non-negativeness before doing the subtraction.
+      testq(src, src);
+      j(not_sign, on_not_smi_result);
+      LoadSmiConstant(kScratchRegister, constant);
+      subq(dst, kScratchRegister);
+    } else {
+      // Subtract by adding the negation.
+      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
+      addq(kScratchRegister, dst);
+      j(overflow, on_not_smi_result);
+      movq(dst, kScratchRegister);
+    }
+  } else {
+    if (constant->value() == Smi::kMinValue) {
+      // Subtracting min-value from any non-negative value will overflow.
+      // We test the non-negativeness before doing the subtraction.
+      testq(src, src);
+      j(not_sign, on_not_smi_result);
+      LoadSmiConstant(dst, constant);
+      // Adding and subtracting the min-value gives the same result, it only
+      // differs on the overflow bit, which we don't check here.
+      addq(dst, src);
+    } else {
+      // Subtract by adding the negation.
+      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
+      addq(dst, src);
+      j(overflow, on_not_smi_result);
+    }
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiDiv(Register dst,
+                            Register src1,
+                            Register src2,
+                            LabelType* on_not_smi_result) {
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+
+  // Check for 0 divisor (result is +/-Infinity).
+  NearLabel positive_divisor;
+  testq(src2, src2);
+  j(zero, on_not_smi_result);
+
+  if (src1.is(rax)) {
+    movq(kScratchRegister, src1);
+  }
+  SmiToInteger32(rax, src1);
+  // We need to rule out dividing Smi::kMinValue by -1, since that would
+  // overflow in idiv and raise an exception.
+  // We combine this with negative zero test (negative zero only happens
+  // when dividing zero by a negative number).
+
+  // We overshoot a little and go to slow case if we divide min-value
+  // by any negative value, not just -1.
+  NearLabel safe_div;
+  testl(rax, Immediate(0x7fffffff));
+  j(not_zero, &safe_div);
+  testq(src2, src2);
+  if (src1.is(rax)) {
+    j(positive, &safe_div);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result);
+  } else {
+    j(negative, on_not_smi_result);
+  }
+  bind(&safe_div);
+
+  SmiToInteger32(src2, src2);
+  // Sign extend src1 into edx:eax.
+  cdq();
+  idivl(src2);
+  Integer32ToSmi(src2, src2);
+  // Check that the remainder is zero.
+  testl(rdx, rdx);
+  if (src1.is(rax)) {
+    NearLabel smi_result;
+    j(zero, &smi_result);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result);
+    bind(&smi_result);
+  } else {
+    j(not_zero, on_not_smi_result);
+  }
+  if (!dst.is(src1) && src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  Integer32ToSmi(dst, rax);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiMod(Register dst,
+                            Register src1,
+                            Register src2,
+                            LabelType* on_not_smi_result) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+  ASSERT(!src1.is(src2));
+
+  testq(src2, src2);
+  j(zero, on_not_smi_result);
+
+  if (src1.is(rax)) {
+    movq(kScratchRegister, src1);
+  }
+  SmiToInteger32(rax, src1);
+  SmiToInteger32(src2, src2);
+
+  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+  NearLabel safe_div;
+  cmpl(rax, Immediate(Smi::kMinValue));
+  j(not_equal, &safe_div);
+  cmpl(src2, Immediate(-1));
+  j(not_equal, &safe_div);
+  // Retag inputs and go slow case.
+  Integer32ToSmi(src2, src2);
+  if (src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  jmp(on_not_smi_result);
+  bind(&safe_div);
+
+  // Sign extend eax into edx:eax.
+  cdq();
+  idivl(src2);
+  // Restore smi tags on inputs.
+  Integer32ToSmi(src2, src2);
+  if (src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  // Check for a negative zero result.  If the result is zero, and the
+  // dividend is negative, go slow to return a floating point negative zero.
+  NearLabel smi_result;
+  testl(rdx, rdx);
+  j(not_zero, &smi_result);
+  testq(src1, src1);
+  j(negative, on_not_smi_result);
+  bind(&smi_result);
+  Integer32ToSmi(dst, rdx);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiShiftLogicalRightConstant(
+    Register dst, Register src, int shift_value, LabelType* on_not_smi_result) {
+  // Logic right shift interprets its result as an *unsigned* number.
+  if (dst.is(src)) {
+    UNIMPLEMENTED();  // Not used.
+  } else {
+    movq(dst, src);
+    if (shift_value == 0) {
+      testq(dst, dst);
+      j(negative, on_not_smi_result);
+    }
+    shr(dst, Immediate(shift_value + kSmiShift));
+    shl(dst, Immediate(kSmiShift));
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+                                          Register src1,
+                                          Register src2,
+                                          LabelType* on_not_smi_result) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(rcx));
+  NearLabel result_ok;
+  if (src1.is(rcx) || src2.is(rcx)) {
+    movq(kScratchRegister, rcx);
+  }
+  if (!dst.is(src1)) {
+    movq(dst, src1);
+  }
+  SmiToInteger32(rcx, src2);
+  orl(rcx, Immediate(kSmiShift));
+  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
+  shl(dst, Immediate(kSmiShift));
+  testq(dst, dst);
+  if (src1.is(rcx) || src2.is(rcx)) {
+    NearLabel positive_result;
+    j(positive, &positive_result);
+    if (src1.is(rcx)) {
+      movq(src1, kScratchRegister);
+    } else {
+      movq(src2, kScratchRegister);
+    }
+    jmp(on_not_smi_result);
+    bind(&positive_result);
+  } else {
+    j(negative, on_not_smi_result);  // src2 was zero and src1 negative.
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SelectNonSmi(Register dst,
+                                  Register src1,
+                                  Register src2,
+                                  LabelType* on_not_smis) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(src1));
+  ASSERT(!dst.is(src2));
+  // Both operands must not be smis.
+#ifdef DEBUG
+  if (allow_stub_calls()) {  // Check contains a stub call.
+    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+  }
+#endif
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT_EQ(0, Smi::FromInt(0));
+  movl(kScratchRegister, Immediate(kSmiTagMask));
+  and_(kScratchRegister, src1);
+  testl(kScratchRegister, src2);
+  // If non-zero then both are smis.
+  j(not_zero, on_not_smis);
+
+  // Exactly one operand is a smi.
+  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+  subq(kScratchRegister, Immediate(1));
+  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+  movq(dst, src1);
+  xor_(dst, src2);
+  and_(dst, kScratchRegister);
+  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+  xor_(dst, src1);
+  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfSmi(Register src, LabelType* on_smi) {
+  ASSERT_EQ(0, kSmiTag);
+  Condition smi = CheckSmi(src);
+  j(smi, on_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotSmi(Register src, LabelType* on_not_smi) {
+  Condition smi = CheckSmi(src);
+  j(NegateCondition(smi), on_not_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotPositiveSmi(Register src,
+                                          LabelType* on_not_positive_smi) {
+  Condition positive_smi = CheckPositiveSmi(src);
+  j(NegateCondition(positive_smi), on_not_positive_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+                                             Smi* constant,
+                                             LabelType* on_equals) {
+  SmiCompare(src, constant);
+  j(equal, on_equals);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotValidSmiValue(Register src,
+                                            LabelType* on_invalid) {
+  Condition is_valid = CheckInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
+                                                LabelType* on_invalid) {
+  Condition is_valid = CheckUInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothSmi(Register src1,
+                                      Register src2,
+                                      LabelType* on_not_both_smi) {
+  Condition both_smi = CheckBothSmi(src1, src2);
+  j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1,
+                                              Register src2,
+                                              LabelType* on_not_both_smi) {
+  Condition both_smi = CheckBothPositiveSmi(src1, src2);
+  j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
+                                                         Register second_object,
+                                                         Register scratch1,
+                                                         Register scratch2,
+                                                         LabelType* on_fail) {
+  // Check that both objects are not smis.
+  Condition either_smi = CheckEitherSmi(first_object, second_object);
+  j(either_smi, on_fail);
+
+  // Load instance type for both strings.
+  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+  // Check that both are flat ascii strings.
+  ASSERT(kNotStringTag != 0);
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+  andl(scratch1, Immediate(kFlatAsciiStringMask));
+  andl(scratch2, Immediate(kFlatAsciiStringMask));
+  // Interleave the bits to check both scratch1 and scratch2 in one test.
+  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+  cmpl(scratch1,
+       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+  j(not_equal, on_fail);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+    Register instance_type,
+    Register scratch,
+    LabelType *failure) {
+  if (!scratch.is(instance_type)) {
+    movl(scratch, instance_type);
+  }
+
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+
+  andl(scratch, Immediate(kFlatAsciiStringMask));
+  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+  j(not_equal, failure);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+    Register first_object_instance_type,
+    Register second_object_instance_type,
+    Register scratch1,
+    Register scratch2,
+    LabelType* on_fail) {
+  // Load instance type for both strings.
+  movq(scratch1, first_object_instance_type);
+  movq(scratch2, second_object_instance_type);
+
+  // Check that both are flat ascii strings.
+  ASSERT(kNotStringTag != 0);
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+  andl(scratch1, Immediate(kFlatAsciiStringMask));
+  andl(scratch2, Immediate(kFlatAsciiStringMask));
+  // Interleave the bits to check both scratch1 and scratch2 in one test.
+  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+  cmpl(scratch1,
+       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+  j(not_equal, on_fail);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                LabelType* branch) {
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    // The mask isn't really an address.  We load it as an external reference in
+    // case the size of the new space is different between the snapshot maker
+    // and the running system.
+    if (scratch.is(object)) {
+      movq(kScratchRegister, ExternalReference::new_space_mask());
+      and_(scratch, kScratchRegister);
+    } else {
+      movq(scratch, ExternalReference::new_space_mask());
+      and_(scratch, object);
+    }
+    movq(kScratchRegister, ExternalReference::new_space_start());
+    cmpq(scratch, kScratchRegister);
+    j(cc, branch);
+  } else {
+    ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
+    intptr_t new_space_start =
+        reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
+    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+    if (scratch.is(object)) {
+      addq(scratch, kScratchRegister);
+    } else {
+      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+    }
+    and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+    j(cc, branch);
+  }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    Register code_register,
+                                    LabelType* done,
+                                    InvokeFlag flag) {
+  bool definitely_matches = false;
+  NearLabel invoke;
+  if (expected.is_immediate()) {
+    ASSERT(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      Set(rax, actual.immediate());
+      if (expected.immediate() ==
+              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+        // Don't worry about adapting arguments for built-ins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        Set(rbx, expected.immediate());
+      }
+    }
+  } else {
+    if (actual.is_immediate()) {
+      // Expected is in register, actual is immediate. This is the
+      // case when we invoke function values without going through the
+      // IC mechanism.
+      cmpq(expected.reg(), Immediate(actual.immediate()));
+      j(equal, &invoke);
+      ASSERT(expected.reg().is(rbx));
+      Set(rax, actual.immediate());
+    } else if (!expected.reg().is(actual.reg())) {
+      // Both expected and actual are in (different) registers. This
+      // is the case when we invoke functions using call and apply.
+      cmpq(expected.reg(), actual.reg());
+      j(equal, &invoke);
+      ASSERT(actual.reg().is(rax));
+      ASSERT(expected.reg().is(rbx));
+    }
+  }
+
+  if (!definitely_matches) {
+    Handle<Code> adaptor =
+        Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+    if (!code_constant.is_null()) {
+      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    } else if (!code_register.is(rdx)) {
+      movq(rdx, code_register);
+    }
+
+    if (flag == CALL_FUNCTION) {
+      Call(adaptor, RelocInfo::CODE_TARGET);
+      jmp(done);
+    } else {
+      Jump(adaptor, RelocInfo::CODE_TARGET);
+    }
+    bind(&invoke);
+  }
+}
+
 
 } }  // namespace v8::internal
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 765a90c..75956eb 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -216,7 +216,12 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype) {
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  // Check we're still in the same context.
+  __ Move(prototype, Top::global());
+  __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)),
+          prototype);
+  __ j(not_equal, miss);
   // Get the global function with the given index.
   JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
   // Load its initial map. The global functions all have initial maps.
@@ -964,7 +969,7 @@
         __ j(above_equal, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::STRING_FUNCTION_INDEX, rax);
+            masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
                         rbx, rdx, rdi, name, &miss);
       }
@@ -983,7 +988,7 @@
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::NUMBER_FUNCTION_INDEX, rax);
+            masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
                         rbx, rdx, rdi, name, &miss);
       }
@@ -1004,7 +1009,7 @@
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::BOOLEAN_FUNCTION_INDEX, rax);
+            masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
                         rbx, rdx, rdi, name, &miss);
       }
@@ -1358,7 +1363,8 @@
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            rax);
+                                            rax,
+                                            &miss);
   ASSERT(object != holder);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
                   rbx, rdx, rdi, name, &miss);
@@ -1429,7 +1435,8 @@
   // Check that the maps starting from the prototype haven't changed.
   GenerateDirectLoadGlobalFunctionPrototype(masm(),
                                             Context::STRING_FUNCTION_INDEX,
-                                            rax);
+                                            rax,
+                                            &miss);
   ASSERT(object != holder);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
                   rbx, rdx, rdi, name, &miss);
@@ -1541,6 +1548,16 @@
 }
 
 
+Object* CallStubCompiler::CompileMathFloorCall(Object* object,
+                                               JSObject* holder,
+                                               JSGlobalPropertyCell* cell,
+                                               JSFunction* function,
+                                               String* name) {
+  // TODO(872): implement this.
+  return Heap::undefined_value();
+}
+
+
 Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
                                                  JSObject* holder,
                                                  String* name) {
@@ -1845,12 +1862,12 @@
     __ Check(not_equal, "DontDelete cells can't contain the hole");
   }
 
-  __ IncrementCounter(&Counters::named_load_global_inline, 1);
+  __ IncrementCounter(&Counters::named_load_global_stub, 1);
   __ movq(rax, rbx);
   __ ret(0);
 
   __ bind(&miss);
-  __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+  __ IncrementCounter(&Counters::named_load_global_stub_miss, 1);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.