Update V8 to r7427: Initial merge by git

As required by WebKit r82507

Change-Id: I7ae83ef3f689356043b4929255b7c1dd31d8c5df
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index d32b009..91c4747 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -132,8 +132,6 @@
 // -------------------------------------------------------------------------
 // CodeGenerator implementation
 
-int CodeGenerator::inlined_write_barrier_size_ = -1;
-
 CodeGenerator::CodeGenerator(MacroAssembler* masm)
     : deferred_(8),
       masm_(masm),
@@ -307,7 +305,7 @@
     if (!scope()->HasIllegalRedeclaration()) {
       Comment cmnt(masm_, "[ function body");
 #ifdef DEBUG
-      bool is_builtin = Bootstrapper::IsActive();
+      bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
       bool should_trace =
           is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
       if (should_trace) {
@@ -577,11 +575,13 @@
 
 ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
   if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-  ASSERT(scope()->arguments_shadow() != NULL);
+
+  // In strict mode there is no need for shadow arguments.
+  ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
   // We don't want to do lazy arguments allocation for functions that
   // have heap-allocated contexts, because it interfers with the
   // uninitialized const tracking in the context objects.
-  return (scope()->num_heap_slots() > 0)
+  return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
       ? EAGER_ARGUMENTS_ALLOCATION
       : LAZY_ARGUMENTS_ALLOCATION;
 }
@@ -599,7 +599,9 @@
     frame_->EmitPushRoot(Heap::kArgumentsMarkerRootIndex);
   } else {
     frame_->SpillAll();
-    ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+    ArgumentsAccessStub stub(is_strict_mode()
+        ? ArgumentsAccessStub::NEW_STRICT
+        : ArgumentsAccessStub::NEW_NON_STRICT);
     __ ldr(r2, frame_->Function());
     // The receiver is below the arguments, the return address, and the
     // frame pointer on the stack.
@@ -615,7 +617,9 @@
   Variable* arguments = scope()->arguments();
   Variable* shadow = scope()->arguments_shadow();
   ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
-  ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
+  ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
+         scope()->is_strict_mode());
+
   JumpTarget done;
   if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
     // We have to skip storing into the arguments slot if it has
@@ -629,7 +633,9 @@
   }
   StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
   if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
-  StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
+  if (shadow != NULL) {
+    StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
+  }
 }
 
 
@@ -764,7 +770,7 @@
     true_target->Branch(eq);
 
     // Slow case.
-    if (CpuFeatures::IsSupported(VFP3)) {
+    if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
       // Implements the slow case by using ToBooleanStub.
       // The ToBooleanStub takes a single argument, and
@@ -961,7 +967,8 @@
 void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
   ASSERT(Token::IsBitOp(op_));
 
-  if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
+  if ((op_ == Token::SHR) &&
+      !Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
     // >>> requires an unsigned to double conversion and the non VFP code
     // does not support this conversion.
     __ b(cond, entry_label());
@@ -1065,7 +1072,7 @@
 void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
                                                    Register heap_number,
                                                    Register scratch) {
-  if (CpuFeatures::IsSupported(VFP3)) {
+  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     __ vmov(s0, answer);
     if (op_ == Token::SHR) {
@@ -1135,7 +1142,7 @@
         // SHR is special because it is required to produce a positive answer.
         __ cmp(int32, Operand(0, RelocInfo::NONE));
       }
-      if (CpuFeatures::IsSupported(VFP3)) {
+      if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
         __ b(mi, &result_not_a_smi);
       } else {
         // Non VFP code cannot convert from unsigned to double, so fall back
@@ -1153,7 +1160,7 @@
   }
   // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
   // if the shift if more than 0 or SHR if the shit is more than 1.
-  if (!( (op_ == Token::AND) ||
+  if (!( (op_ == Token::AND && value_ >= 0) ||
         ((op_ == Token::SAR) && (shift_value > 0)) ||
         ((op_ == Token::SHR) && (shift_value > 1)))) {
     __ add(r3, int32, Operand(0x40000000), SetCC);
@@ -1414,8 +1421,10 @@
           default: UNREACHABLE();
         }
         deferred->BindExit();
-        TypeInfo result_type =
-            (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32();
+        TypeInfo result_type = TypeInfo::Integer32();
+        if (op == Token::BIT_AND && int_value >= 0) {
+          result_type = TypeInfo::Smi();
+        }
         frame_->EmitPush(tos, result_type);
       }
       break;
@@ -1714,7 +1723,7 @@
   // Load applicand.apply onto the stack. This will usually
   // give us a megamorphic load site. Not super, but it works.
   Load(applicand);
-  Handle<String> name = Factory::LookupAsciiSymbol("apply");
+  Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
   frame_->Dup();
   frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
   frame_->EmitPush(r0);
@@ -1777,7 +1786,8 @@
   __ JumpIfSmi(r0, &build_args);
   __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
   __ b(ne, &build_args);
-  Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+  Handle<Code> apply_code(
+      Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply));
   __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
   __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ cmp(r1, Operand(apply_code));
@@ -1992,7 +2002,7 @@
   // If we have a function or a constant, we need to initialize the variable.
   Expression* val = NULL;
   if (node->mode() == Variable::CONST) {
-    val = new Literal(Factory::the_hole_value());
+    val = new Literal(FACTORY->the_hole_value());
   } else {
     val = node->fun();  // NULL if we don't have a function
   }
@@ -2849,7 +2859,7 @@
   function_return_is_shadowed_ = function_return_was_shadowed;
 
   // Get an external reference to the handler address.
-  ExternalReference handler_address(Top::k_handler_address);
+  ExternalReference handler_address(Isolate::k_handler_address, isolate());
 
   // If we can fall off the end of the try block, unlink from try chain.
   if (has_valid_frame()) {
@@ -2965,7 +2975,7 @@
   function_return_is_shadowed_ = function_return_was_shadowed;
 
   // Get an external reference to the handler address.
-  ExternalReference handler_address(Top::k_handler_address);
+  ExternalReference handler_address(Isolate::k_handler_address, isolate());
 
   // If we can fall off the end of the try block, unlink from the try
   // chain and set the state on the frame to FALLING.
@@ -3106,10 +3116,11 @@
     bool pretenure) {
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
-  if (scope()->is_function_scope() &&
-      function_info->num_literals() == 0 &&
-      !pretenure) {
-    FastNewClosureStub stub;
+  if (!pretenure &&
+      scope()->is_function_scope() &&
+      function_info->num_literals() == 0) {
+    FastNewClosureStub stub(
+        function_info->strict_mode() ? kStrictMode : kNonStrictMode);
     frame_->EmitPush(Operand(function_info));
     frame_->SpillAll();
     frame_->CallStub(&stub, 1);
@@ -3119,8 +3130,8 @@
     frame_->EmitPush(cp);
     frame_->EmitPush(Operand(function_info));
     frame_->EmitPush(Operand(pretenure
-                             ? Factory::true_value()
-                             : Factory::false_value()));
+                             ? FACTORY->true_value()
+                             : FACTORY->false_value()));
     frame_->CallRuntime(Runtime::kNewClosure, 3);
     frame_->EmitPush(r0);
   }
@@ -3620,7 +3631,8 @@
         // else fall through
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
-          Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+          Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+              Builtins::kStoreIC_Initialize));
           Load(value);
           if (property->emit_store()) {
             frame_->PopToR0();
@@ -3683,11 +3695,12 @@
   frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
   frame_->EmitPush(Operand(node->constant_elements()));
   int length = node->values()->length();
-  if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+  if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
     FastCloneShallowArrayStub stub(
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
     frame_->CallStub(&stub, 3);
-    __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2);
+    __ IncrementCounter(masm_->isolate()->counters()->cow_arrays_created_stub(),
+                        1, r1, r2);
   } else if (node->depth() > 1) {
     frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
@@ -4243,7 +4256,8 @@
     // Setup the name register and call the IC initialization code.
     __ mov(r2, Operand(var->name()));
     InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    Handle<Code> stub = StubCache::ComputeCallInitialize(arg_count, in_loop);
+    Handle<Code> stub =
+        ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
     CodeForSourcePosition(node->position());
     frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
                            arg_count + 1);
@@ -4338,7 +4352,7 @@
         __ mov(r2, Operand(name));
         InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
         Handle<Code> stub =
-            StubCache::ComputeCallInitialize(arg_count, in_loop);
+            ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
         CodeForSourcePosition(node->position());
         frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
         __ ldr(cp, frame_->Context());
@@ -4380,7 +4394,8 @@
         // Load the key into r2 and call the IC initialization code.
         InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
         Handle<Code> stub =
-            StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
+            ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count,
+                                                              in_loop);
         CodeForSourcePosition(node->position());
         frame_->SpillAll();
         __ ldr(r2, frame_->ElementAt(arg_count + 1));
@@ -4445,7 +4460,8 @@
   // Call the construct call builtin that handles allocation and
   // constructor invocation.
   CodeForSourcePosition(node->position());
-  Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+      Builtins::kJSConstructCall));
   frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
   frame_->EmitPush(r0);
 
@@ -4494,13 +4510,13 @@
 
   // Functions have class 'Function'.
   function.Bind();
-  __ mov(tos, Operand(Factory::function_class_symbol()));
+  __ mov(tos, Operand(FACTORY->function_class_symbol()));
   frame_->EmitPush(tos);
   leave.Jump();
 
   // Objects with a non-function constructor have class 'Object'.
   non_function_constructor.Bind();
-  __ mov(tos, Operand(Factory::Object_symbol()));
+  __ mov(tos, Operand(FACTORY->Object_symbol()));
   frame_->EmitPush(tos);
   leave.Jump();
 
@@ -4601,7 +4617,7 @@
   Load(args->at(0));
   Load(args->at(1));
 
-  if (!CpuFeatures::IsSupported(VFP3)) {
+  if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
     frame_->CallRuntime(Runtime::kMath_pow, 2);
     frame_->EmitPush(r0);
   } else {
@@ -4755,7 +4771,7 @@
   ASSERT(args->length() == 1);
   Load(args->at(0));
 
-  if (!CpuFeatures::IsSupported(VFP3)) {
+  if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
     frame_->CallRuntime(Runtime::kMath_sqrt, 1);
     frame_->EmitPush(r0);
   } else {
@@ -5141,7 +5157,7 @@
     Label entry, loop;
     // The use of ip to store the valueOf symbol asumes that it is not otherwise
     // used in the loop below.
-    __ mov(ip, Operand(Factory::value_of_symbol()));
+    __ mov(ip, Operand(FACTORY->value_of_symbol()));
     __ jmp(&entry);
     __ bind(&loop);
     __ ldr(scratch2_, MemOperand(map_result_, 0));
@@ -5344,9 +5360,9 @@
   // Convert 32 random bits in r0 to 0.(32 random bits) in a double
   // by computing:
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  if (CpuFeatures::IsSupported(VFP3)) {
+  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
     __ PrepareCallCFunction(0, r1);
-    __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+    __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
 
     CpuFeatures::Scope scope(VFP3);
     // 0x41300000 is the top half of 1.0 x 2^20 as a double.
@@ -5367,7 +5383,7 @@
     __ mov(r0, Operand(r4));
     __ PrepareCallCFunction(1, r1);
     __ CallCFunction(
-        ExternalReference::fill_heap_number_with_random_function(), 1);
+        ExternalReference::fill_heap_number_with_random_function(isolate()), 1);
     frame_->EmitPush(r0);
   }
 }
@@ -5468,7 +5484,7 @@
   int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
 
   Handle<FixedArray> jsfunction_result_caches(
-      Top::global_context()->jsfunction_result_caches());
+      Isolate::Current()->global_context()->jsfunction_result_caches());
   if (jsfunction_result_caches->length() <= cache_id) {
     __ Abort("Attempt to use undefined cache.");
     frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
@@ -5578,8 +5594,8 @@
   // Fetch the map and check if array is in fast case.
   // Check that object doesn't require security checks and
   // has no indexed interceptor.
-  __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE);
-  deferred->Branch(lt);
+  __ CompareObjectType(object, tmp1, tmp2, JS_ARRAY_TYPE);
+  deferred->Branch(ne);
   __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
   __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
   deferred->Branch(ne);
@@ -5658,7 +5674,7 @@
 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
   Load(args->at(0));
-  if (CpuFeatures::IsSupported(VFP3)) {
+  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
     TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                  TranscendentalCacheStub::TAGGED);
     frame_->SpillAllButCopyTOSToR0();
@@ -5673,7 +5689,7 @@
 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
   Load(args->at(0));
-  if (CpuFeatures::IsSupported(VFP3)) {
+  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
     TranscendentalCacheStub stub(TranscendentalCache::COS,
                                  TranscendentalCacheStub::TAGGED);
     frame_->SpillAllButCopyTOSToR0();
@@ -5688,7 +5704,7 @@
 void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
   Load(args->at(0));
-  if (CpuFeatures::IsSupported(VFP3)) {
+  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
     TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                  TranscendentalCacheStub::TAGGED);
     frame_->SpillAllButCopyTOSToR0();
@@ -5793,7 +5809,7 @@
 
   ZoneList<Expression*>* args = node->arguments();
   Comment cmnt(masm_, "[ CallRuntime");
-  Runtime::Function* function = node->function();
+  const Runtime::Function* function = node->function();
 
   if (function == NULL) {
     // Prepare stack for calling JS runtime function.
@@ -5817,7 +5833,8 @@
     // Call the JS runtime function.
     __ mov(r2, Operand(node->name()));
     InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    Handle<Code> stub = StubCache::ComputeCallInitialize(arg_count, in_loop);
+    Handle<Code> stub =
+        ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
     frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
     __ ldr(cp, frame_->Context());
     frame_->EmitPush(r0);
@@ -6352,7 +6369,7 @@
 
     Register scratch = VirtualFrame::scratch0();
 
-    if (check->Equals(Heap::number_symbol())) {
+    if (check->Equals(HEAP->number_symbol())) {
       __ tst(tos, Operand(kSmiTagMask));
       true_target()->Branch(eq);
       __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
@@ -6360,7 +6377,7 @@
       __ cmp(tos, ip);
       cc_reg_ = eq;
 
-    } else if (check->Equals(Heap::string_symbol())) {
+    } else if (check->Equals(HEAP->string_symbol())) {
       __ tst(tos, Operand(kSmiTagMask));
       false_target()->Branch(eq);
 
@@ -6376,7 +6393,7 @@
       __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
       cc_reg_ = lt;
 
-    } else if (check->Equals(Heap::boolean_symbol())) {
+    } else if (check->Equals(HEAP->boolean_symbol())) {
       __ LoadRoot(ip, Heap::kTrueValueRootIndex);
       __ cmp(tos, ip);
       true_target()->Branch(eq);
@@ -6384,7 +6401,7 @@
       __ cmp(tos, ip);
       cc_reg_ = eq;
 
-    } else if (check->Equals(Heap::undefined_symbol())) {
+    } else if (check->Equals(HEAP->undefined_symbol())) {
       __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
       __ cmp(tos, ip);
       true_target()->Branch(eq);
@@ -6400,7 +6417,7 @@
 
       cc_reg_ = eq;
 
-    } else if (check->Equals(Heap::function_symbol())) {
+    } else if (check->Equals(HEAP->function_symbol())) {
       __ tst(tos, Operand(kSmiTagMask));
       false_target()->Branch(eq);
       Register map_reg = scratch;
@@ -6410,7 +6427,7 @@
       __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
       cc_reg_ = eq;
 
-    } else if (check->Equals(Heap::object_symbol())) {
+    } else if (check->Equals(HEAP->object_symbol())) {
       __ tst(tos, Operand(kSmiTagMask));
       false_target()->Branch(eq);
 
@@ -6572,8 +6589,10 @@
   Register scratch1 = VirtualFrame::scratch0();
   Register scratch2 = VirtualFrame::scratch1();
   ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
-  __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
-  __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
+  __ DecrementCounter(masm_->isolate()->counters()->named_load_inline(),
+                      1, scratch1, scratch2);
+  __ IncrementCounter(masm_->isolate()->counters()->named_load_inline_miss(),
+                      1, scratch1, scratch2);
 
   // Ensure receiver in r0 and name in r2 to match load ic calling convention.
   __ Move(r0, receiver_);
@@ -6581,7 +6600,8 @@
 
   // The rest of the instructions in the deferred code must be together.
   { Assembler::BlockConstPoolScope block_const_pool(masm_);
-    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+    Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+        Builtins::kLoadIC_Initialize));
     RelocInfo::Mode mode = is_contextual_
         ? RelocInfo::CODE_TARGET_CONTEXT
         : RelocInfo::CODE_TARGET;
@@ -6643,8 +6663,10 @@
 
   Register scratch1 = VirtualFrame::scratch0();
   Register scratch2 = VirtualFrame::scratch1();
-  __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
-  __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
+  __ DecrementCounter(masm_->isolate()->counters()->keyed_load_inline(),
+                      1, scratch1, scratch2);
+  __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline_miss(),
+                      1, scratch1, scratch2);
 
   // Ensure key in r0 and receiver in r1 to match keyed load ic calling
   // convention.
@@ -6655,7 +6677,8 @@
   // The rest of the instructions in the deferred code must be together.
   { Assembler::BlockConstPoolScope block_const_pool(masm_);
     // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
-    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+    Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+        Builtins::kKeyedLoadIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET);
     // The call must be followed by a nop instruction to indicate that the
     // keyed load has been inlined.
@@ -6702,9 +6725,10 @@
 void DeferredReferenceSetKeyedValue::Generate() {
   Register scratch1 = VirtualFrame::scratch0();
   Register scratch2 = VirtualFrame::scratch1();
-  __ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2);
-  __ IncrementCounter(
-      &Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
+  __ DecrementCounter(masm_->isolate()->counters()->keyed_store_inline(),
+                      1, scratch1, scratch2);
+  __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline_miss(),
+                      1, scratch1, scratch2);
 
   // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
   // calling convention.
@@ -6717,9 +6741,10 @@
   { Assembler::BlockConstPoolScope block_const_pool(masm_);
     // Call keyed store IC. It has the arguments value, key and receiver in r0,
     // r1 and r2.
-    Handle<Code> ic(Builtins::builtin(
-        (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
-                                      : Builtins::KeyedStoreIC_Initialize));
+    Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+        (strict_mode_ == kStrictMode)
+        ? Builtins::kKeyedStoreIC_Initialize_Strict
+        : Builtins::kKeyedStoreIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET);
     // The call must be followed by a nop instruction to indicate that the
     // keyed store has been inlined.
@@ -6772,9 +6797,9 @@
   { Assembler::BlockConstPoolScope block_const_pool(masm_);
     // Call keyed store IC. It has the arguments value, key and receiver in r0,
     // r1 and r2.
-    Handle<Code> ic(Builtins::builtin(
-        (strict_mode_ == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
-                                      : Builtins::StoreIC_Initialize));
+    Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+        (strict_mode_ == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
+                                      : Builtins::kStoreIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET);
     // The call must be followed by a nop instruction to indicate that the
     // named store has been inlined.
@@ -6798,7 +6823,7 @@
 void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
   bool contextual_load_in_builtin =
       is_contextual &&
-      (Bootstrapper::IsActive() ||
+      (ISOLATE->bootstrapper()->IsActive() ||
       (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
 
   if (scope()->is_global_scope() ||
@@ -6820,11 +6845,12 @@
     // Counter will be decremented in the deferred code. Placed here to avoid
     // having it in the instruction stream below where patching will occur.
     if (is_contextual) {
-      __ IncrementCounter(&Counters::named_load_global_inline, 1,
-                          frame_->scratch0(), frame_->scratch1());
+      __ IncrementCounter(
+          masm_->isolate()->counters()->named_load_global_inline(),
+          1, frame_->scratch0(), frame_->scratch1());
     } else {
-      __ IncrementCounter(&Counters::named_load_inline, 1,
-                          frame_->scratch0(), frame_->scratch1());
+      __ IncrementCounter(masm_->isolate()->counters()->named_load_inline(),
+                          1, frame_->scratch0(), frame_->scratch1());
     }
 
     // The following instructions are the inlined load of an in-object property.
@@ -6856,8 +6882,9 @@
         }
       }
       if (is_dont_delete) {
-        __ IncrementCounter(&Counters::dont_delete_hint_hit, 1,
-                            frame_->scratch0(), frame_->scratch1());
+        __ IncrementCounter(
+            masm_->isolate()->counters()->dont_delete_hint_hit(),
+            1, frame_->scratch0(), frame_->scratch1());
       }
     }
 
@@ -6893,7 +6920,7 @@
       // Check the map. The null map used below is patched by the inline cache
       // code.  Therefore we can't use a LoadRoot call.
       __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
-      __ mov(scratch2, Operand(Factory::null_value()));
+      __ mov(scratch2, Operand(FACTORY->null_value()));
       __ cmp(scratch, scratch2);
       deferred->Branch(ne);
 
@@ -6902,7 +6929,7 @@
         InlinedNamedLoadInstructions += 1;
 #endif
         // Load the (initially invalid) cell and get its value.
-        masm()->mov(receiver, Operand(Factory::null_value()));
+        masm()->mov(receiver, Operand(FACTORY->null_value()));
         __ ldr(receiver,
                FieldMemOperand(receiver, JSGlobalPropertyCell::kValueOffset));
 
@@ -6912,13 +6939,13 @@
 #ifdef DEBUG
           InlinedNamedLoadInstructions += 3;
 #endif
-          __ cmp(receiver, Operand(Factory::the_hole_value()));
+          __ cmp(receiver, Operand(FACTORY->the_hole_value()));
           deferred->Branch(eq);
         } else if (FLAG_debug_code) {
 #ifdef DEBUG
           InlinedNamedLoadInstructions += 3;
 #endif
-          __ cmp(receiver, Operand(Factory::the_hole_value()));
+          __ cmp(receiver, Operand(FACTORY->the_hole_value()));
           __ b(&check_the_hole, eq);
           __ bind(&cont);
         }
@@ -6986,7 +7013,7 @@
       Label check_inlined_codesize;
       masm_->bind(&check_inlined_codesize);
 #endif
-      __ mov(scratch0, Operand(Factory::null_value()));
+      __ mov(scratch0, Operand(FACTORY->null_value()));
       __ cmp(scratch0, scratch1);
       deferred->Branch(ne);
 
@@ -7016,11 +7043,11 @@
       // Check that this is the first inlined write barrier or that
       // this inlined write barrier has the same size as all the other
       // inlined write barriers.
-      ASSERT((inlined_write_barrier_size_ == -1) ||
-             (inlined_write_barrier_size_ ==
+      ASSERT((Isolate::Current()->inlined_write_barrier_size() == -1) ||
+             (Isolate::Current()->inlined_write_barrier_size() ==
               masm()->InstructionsGeneratedSince(&record_write_start)));
-      inlined_write_barrier_size_ =
-          masm()->InstructionsGeneratedSince(&record_write_start);
+      Isolate::Current()->set_inlined_write_barrier_size(
+          masm()->InstructionsGeneratedSince(&record_write_start));
 
       // Make sure that the expected number of instructions are generated.
       ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
@@ -7042,8 +7069,8 @@
 
     // Counter will be decremented in the deferred code. Placed here to avoid
     // having it in the instruction stream below where patching will occur.
-    __ IncrementCounter(&Counters::keyed_load_inline, 1,
-                        frame_->scratch0(), frame_->scratch1());
+    __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline(),
+                        1, frame_->scratch0(), frame_->scratch1());
 
     // Load the key and receiver from the stack.
     bool key_is_known_smi = frame_->KnownSmiAt(0);
@@ -7079,7 +7106,7 @@
       Label check_inlined_codesize;
       masm_->bind(&check_inlined_codesize);
 #endif
-      __ mov(scratch2, Operand(Factory::null_value()));
+      __ mov(scratch2, Operand(FACTORY->null_value()));
       __ cmp(scratch1, scratch2);
       deferred->Branch(ne);
 
@@ -7129,9 +7156,8 @@
 
     // Counter will be decremented in the deferred code. Placed here to avoid
     // having it in the instruction stream below where patching will occur.
-    __ IncrementCounter(&Counters::keyed_store_inline, 1,
-                        scratch1, scratch2);
-
+    __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline(),
+                        1, scratch1, scratch2);
 
 
     // Load the value, key and receiver from the stack.
@@ -7181,18 +7207,14 @@
     __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
     deferred->Branch(ne);
 
-    // Check that the key is within bounds. Both the key and the length of
-    // the JSArray are smis. Use unsigned comparison to handle negative keys.
-    __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset));
-    __ cmp(scratch1, key);
-    deferred->Branch(ls);  // Unsigned less equal.
-
     // Get the elements array from the receiver.
     __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
     if (!value_is_harmless && wb_info != LIKELY_SMI) {
       Label ok;
-      __ and_(scratch2, scratch1, Operand(ExternalReference::new_space_mask()));
-      __ cmp(scratch2, Operand(ExternalReference::new_space_start()));
+      __ and_(scratch2,
+              scratch1,
+              Operand(ExternalReference::new_space_mask(isolate())));
+      __ cmp(scratch2, Operand(ExternalReference::new_space_start(isolate())));
       __ tst(value, Operand(kSmiTagMask), ne);
       deferred->Branch(ne);
 #ifdef DEBUG
@@ -7201,6 +7223,7 @@
     }
     // Check that the elements array is not a dictionary.
     __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
+
     // The following instructions are the part of the inlined store keyed
     // property code which can be patched. Therefore the exact number of
     // instructions generated need to be fixed, so the constant pool is blocked
@@ -7216,10 +7239,18 @@
       // comparison to always fail so that we will hit the IC call in the
       // deferred code which will allow the debugger to break for fast case
       // stores.
-      __ mov(scratch3, Operand(Factory::fixed_array_map()));
+      __ mov(scratch3, Operand(FACTORY->fixed_array_map()));
       __ cmp(scratch2, scratch3);
       deferred->Branch(ne);
 
+      // Check that the key is within bounds.  Both the key and the length of
+      // the JSArray are smis (because the fixed array check above ensures the
+      // elements are in fast case). Use unsigned comparison to handle negative
+      // keys.
+      __ ldr(scratch3, FieldMemOperand(receiver, JSArray::kLengthOffset));
+      __ cmp(scratch3, key);
+      deferred->Branch(ls);  // Unsigned less equal.
+
       // Store the value.
       __ add(scratch1, scratch1,
              Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -7378,7 +7409,7 @@
 const char* GenericBinaryOpStub::GetName() {
   if (name_ != NULL) return name_;
   const int len = 100;
-  name_ = Bootstrapper::AllocateAutoDeletedArray(len);
+  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(len);
   if (name_ == NULL) return "OOM";
   const char* op_name = Token::Name(op_);
   const char* overwrite_name;
@@ -7398,7 +7429,6 @@
   return name_;
 }
 
-
 #undef __
 
 } }  // namespace v8::internal