Version 1.3.6

Add support for forceful termination of JavaScript execution.

Add low memory notification to the API. The embedding host can signal a low memory situation to V8.

Changed the handling of global handles (persistent handles in the API sense) to avoid issues regarding allocation of new global handles during weak handle callbacks.

Changed the growth policy of the young space.

Fixed a GC issue introduced in version 1.3.5.



git-svn-id: http://v8.googlecode.com/svn/trunk@3024 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index cd5a1bb..5417ed7 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -81,7 +81,13 @@
 
 Object* RelocInfo::target_object() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+  return Memory::Object_at(Assembler::target_address_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
 }
 
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 7e43f2e..d1df08c 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -645,8 +645,8 @@
     str(src, MemOperand(sp, 4, NegPreIndex), cond);
   }
 
-  void pop(Register dst) {
-    ldr(dst, MemOperand(sp, 4, PostIndex), al);
+  void pop(Register dst, Condition cond = al) {
+    ldr(dst, MemOperand(sp, 4, PostIndex), cond);
   }
 
   void pop() {
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index cdea1cb..d7afb37 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -44,15 +44,379 @@
   __ str(r1, MemOperand(ip, 0));
 
   // The actual argument count has already been loaded into register
-  // r0, but JumpToBuiltin expects r0 to contain the number of
+  // r0, but JumpToRuntime expects r0 to contain the number of
   // arguments including the receiver.
   __ add(r0, r0, Operand(1));
-  __ JumpToBuiltin(ExternalReference(id));
+  __ JumpToRuntime(ExternalReference(id));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the global context.
+
+  __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ ldr(result,
+         FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+  // Load the Array function from the global context.
+  __ ldr(result,
+         MemOperand(result,
+                    Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. An elements backing store is allocated with size initial_capacity
+// and filled with the hole values.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+                                 Register array_function,
+                                 Register result,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Register scratch3,
+                                 int initial_capacity,
+                                 Label* gc_required) {
+  ASSERT(initial_capacity > 0);
+  // Load the initial map from the array function.
+  __ ldr(scratch1, FieldMemOperand(array_function,
+                                   JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Allocate the JSArray object together with space for a fixed array with the
+  // requested elements.
+  int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+  __ AllocateInNewSpace(size / kPointerSize,
+                        result,
+                        scratch2,
+                        scratch3,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // scratch1: initial map
+  // scratch2: start of next object
+  __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+  __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
+  // Field JSArray::kElementsOffset is initialized later.
+  __ mov(scratch3,  Operand(0));
+  __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // scratch2: start of next object
+  __ lea(scratch1, MemOperand(result, JSArray::kSize));
+  __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+
+  // Clear the heap tag on the elements array.
+  __ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+
+  // Initialize the FixedArray and fill it with holes. FixedArray length is not
+  // stored as a smi.
+  // result: JSObject
+  // scratch1: elements array (untagged)
+  // scratch2: start of next object
+  __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+  __ mov(scratch3,  Operand(initial_capacity));
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+
+  // Fill the FixedArray with the hole value.
+  ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+  ASSERT(initial_capacity <= kLoopUnfoldLimit);
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+  for (int i = 0; i < initial_capacity; i++) {
+    __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+  }
+}
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array_storage and elements_array_end
+// (see  below for when that is not the case). If the parameter fill_with_holes
+// is true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array_storage is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+                            Register array_function,  // Array function.
+                            Register array_size,  // As a smi.
+                            Register result,
+                            Register elements_array_storage,
+                            Register elements_array_end,
+                            Register scratch1,
+                            Register scratch2,
+                            bool fill_with_hole,
+                            Label* gc_required) {
+  Label not_empty, allocated;
+
+  // Load the initial map from the array function.
+  __ ldr(elements_array_storage,
+         FieldMemOperand(array_function,
+                         JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ tst(array_size, array_size);
+  __ b(nz, &not_empty);
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize +
+             FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size / kPointerSize,
+                        result,
+                        elements_array_end,
+                        scratch1,
+                        gc_required,
+                        TAG_OBJECT);
+  __ jmp(&allocated);
+
+  // Allocate the JSArray object together with space for a FixedArray with the
+  // requested number of elements.
+  __ bind(&not_empty);
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ mov(elements_array_end,
+         Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
+  __ add(elements_array_end,
+         elements_array_end,
+         Operand(array_size, ASR, kSmiTagSize));
+  __ AllocateInNewSpace(elements_array_end,
+                        result,
+                        scratch1,
+                        scratch2,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // elements_array_storage: initial map
+  // array_size: size of array (smi)
+  __ bind(&allocated);
+  __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
+  __ str(elements_array_storage,
+         FieldMemOperand(result, JSArray::kPropertiesOffset));
+  // Field JSArray::kElementsOffset is initialized later.
+  __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // array_size: size of array (smi)
+  __ add(elements_array_storage, result, Operand(JSArray::kSize));
+  __ str(elements_array_storage,
+         FieldMemOperand(result, JSArray::kElementsOffset));
+
+  // Clear the heap tag on the elements array.
+  __ and_(elements_array_storage,
+          elements_array_storage,
+          Operand(~kHeapObjectTagMask));
+  // Initialize the fixed array and fill it with holes. FixedArray length is not
+  // stored as a smi.
+  // result: JSObject
+  // elements_array_storage: elements array (untagged)
+  // array_size: size of array (smi)
+  ASSERT(kSmiTag == 0);
+  __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
+  // Convert array_size from smi to value.
+  __ mov(array_size,
+         Operand(array_size, ASR, kSmiTagSize));
+  __ tst(array_size, array_size);
+  // Length of the FixedArray is the number of pre-allocated elements if
+  // the actual JSArray has length 0 and the size of the JSArray for non-empty
+  // JSArrays. The length of a FixedArray is not stored as a smi.
+  __ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq);
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  __ str(array_size,
+         MemOperand(elements_array_storage, kPointerSize, PostIndex));
+
+  // Calculate elements array and elements array end.
+  // result: JSObject
+  // elements_array_storage: elements array element storage
+  // array_size: size of elements array
+  __ add(elements_array_end,
+         elements_array_storage,
+         Operand(array_size, LSL, kPointerSizeLog2));
+
+  // Fill the allocated FixedArray with the hole value if requested.
+  // result: JSObject
+  // elements_array_storage: elements array element storage
+  // elements_array_end: start of next object
+  if (fill_with_hole) {
+    Label loop, entry;
+    __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ str(scratch1,
+           MemOperand(elements_array_storage, kPointerSize, PostIndex));
+    __ bind(&entry);
+    __ cmp(elements_array_storage, elements_array_end);
+    __ b(lt, &loop);
+  }
+}
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+//   r0: argc
+//   r1: constructor (built-in Array function)
+//   lr: return address
+//   sp[0]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in r1 needs to be preserved for
+// entering the generic code. In both cases argc in r0 needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// construct call and normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+                            Label *call_generic_code) {
+  Label argc_one_or_more, argc_two_or_more;
+
+  // Check for array construction with zero arguments or one.
+  __ cmp(r0, Operand(0));
+  __ b(ne, &argc_one_or_more);
+
+  // Handle construction of an empty array.
+  AllocateEmptyJSArray(masm,
+                       r1,
+                       r2,
+                       r3,
+                       r4,
+                       r5,
+                       JSArray::kPreallocatedArrayElements,
+                       call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1, r3, r4);
+  // Setup return value, remove receiver from stack and return.
+  __ mov(r0, r2);
+  __ add(sp, sp, Operand(kPointerSize));
+  __ Jump(lr);
+
+  // Check for one argument. Bail out if argument is not smi or if it is
+  // negative.
+  __ bind(&argc_one_or_more);
+  __ cmp(r0, Operand(1));
+  __ b(ne, &argc_two_or_more);
+  ASSERT(kSmiTag == 0);
+  __ ldr(r2, MemOperand(sp));  // Get the argument from the stack.
+  __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
+  __ b(ne, call_generic_code);
+
+  // Handle construction of an empty array of a certain size. Bail out if size
+  // is too large to actually allocate an elements array.
+  ASSERT(kSmiTag == 0);
+  __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
+  __ b(ge, call_generic_code);
+
+  // r0: argc
+  // r1: constructor
+  // r2: array_size (smi)
+  // sp[0]: argument
+  AllocateJSArray(masm,
+                  r1,
+                  r2,
+                  r3,
+                  r4,
+                  r5,
+                  r6,
+                  r7,
+                  true,
+                  call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1, r2, r4);
+  // Setup return value, remove receiver and argument from stack and return.
+  __ mov(r0, r3);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Jump(lr);
+
+  // Handle construction of an array from a list of arguments.
+  __ bind(&argc_two_or_more);
+  __ mov(r2, Operand(r0, LSL, kSmiTagSize));  // Convet argc to a smi.
+
+  // r0: argc
+  // r1: constructor
+  // r2: array_size (smi)
+  // sp[0]: last argument
+  AllocateJSArray(masm,
+                  r1,
+                  r2,
+                  r3,
+                  r4,
+                  r5,
+                  r6,
+                  r7,
+                  false,
+                  call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1, r2, r6);
+
+  // Fill arguments as array elements. Copy from the top of the stack (last
+  // element) to the array backing store filling it backwards. Note:
+  // elements_array_end points after the backing store therefore PreIndex is
+  // used when filling the backing store.
+  // r0: argc
+  // r3: JSArray
+  // r4: elements_array storage start (untagged)
+  // r5: elements_array_end (untagged)
+  // sp[0]: last argument
+  Label loop, entry;
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
+  __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
+  __ bind(&entry);
+  __ cmp(r4, r5);
+  __ b(lt, &loop);
+
+  // Remove caller arguments and receiver from the stack, setup return value and
+  // return.
+  // r0: argc
+  // r3: JSArray
+  // sp[0]: receiver
+  __ add(sp, sp, Operand(kPointerSize));
+  __ mov(r0, r3);
+  __ Jump(lr);
 }
 
 
 void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
-  // Just jump to the generic array code.
+  // ----------- S t a t e -------------
+  //  -- r0     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, r1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array function shoud be a map.
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ tst(r2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for Array function");
+    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+    __ Assert(eq, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  ArrayNativeCode(masm, &generic_array_code);
+
+  // Jump to the generic array code if the specialized code cannot handle
+  // the construction.
+  __ bind(&generic_array_code);
   Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
   Handle<Code> array_code(code);
   __ Jump(array_code, RelocInfo::CODE_TARGET);
@@ -60,7 +424,34 @@
 
 
 void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
-  // Just jump to the generic construct code.
+  // ----------- S t a t e -------------
+  //  -- r0     : number of arguments
+  //  -- r1     : constructor function
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_constructor;
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the builtin Array function which
+    // always have a map.
+    GenerateLoadArrayFunction(masm, r2);
+    __ cmp(r1, r2);
+    __ Assert(eq, "Unexpected Array function");
+    // Initial map for the builtin Array function should be a map.
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ tst(r2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for Array function");
+    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+    __ Assert(eq, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as a constructor.
+  ArrayNativeCode(masm, &generic_constructor);
+
+  // Jump to the generic construct code in case the specialized code cannot
+  // handle the construction.
+  __ bind(&generic_constructor);
   Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
   Handle<Code> generic_construct_stub(code);
   __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
@@ -149,7 +540,7 @@
     // r2: initial map
     // r7: undefined
     __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-    __ AllocateObjectInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
+    __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
 
     // Allocated the JSObject, now initialize the fields. Map is set to initial
     // map and properties and elements are set to empty fixed array.
@@ -220,12 +611,12 @@
     // r5: start of next object
     // r7: undefined
     __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
-    __ AllocateObjectInNewSpace(r0,
-                                r5,
-                                r6,
-                                r2,
-                                &undo_allocation,
-                                RESULT_CONTAINS_TOP);
+    __ AllocateInNewSpace(r0,
+                          r5,
+                          r6,
+                          r2,
+                          &undo_allocation,
+                          RESULT_CONTAINS_TOP);
 
     // Initialize the FixedArray.
     // r1: constructor
diff --git a/src/arm/cfg-arm.cc b/src/arm/cfg-arm.cc
deleted file mode 100644
index e0e563c..0000000
--- a/src/arm/cfg-arm.cc
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-arm.h"  // Include after codegen-inl.h.
-#include "macro-assembler-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
-  ASSERT(!is_marked());
-  is_marked_ = true;
-  {
-    Comment cmt(masm, "[ InstructionBlock");
-    for (int i = 0, len = instructions_.length(); i < len; i++) {
-      // If the location of the current instruction is a temp, then the
-      // instruction cannot be in tail position in the block.  Allocate the
-      // temp based on peeking ahead to the next instruction.
-      Instruction* instr = instructions_[i];
-      Location* loc = instr->location();
-      if (loc->is_temporary()) {
-        instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
-      }
-      instructions_[i]->Compile(masm);
-    }
-  }
-  successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
-  ASSERT(!is_marked());
-  is_marked_ = true;
-  {
-    Comment cmnt(masm, "[ EntryNode");
-    __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
-    __ add(fp, sp, Operand(2 * kPointerSize));
-    int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
-    if (count > 0) {
-      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-      for (int i = 0; i < count; i++) {
-        __ push(ip);
-      }
-    }
-    if (FLAG_trace) {
-      __ CallRuntime(Runtime::kTraceEnter, 0);
-    }
-    if (FLAG_check_stack) {
-      StackCheckStub stub;
-      __ CallStub(&stub);
-    }
-  }
-  successor_->Compile(masm);
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
-  ASSERT(!is_marked());
-  is_marked_ = true;
-  Comment cmnt(masm, "[ ExitNode");
-  if (FLAG_trace) {
-    __ push(r0);
-    __ CallRuntime(Runtime::kTraceExit, 1);
-  }
-  __ mov(sp, fp);
-  __ ldm(ia_w, sp, fp.bit() | lr.bit());
-  int count = CfgGlobals::current()->fun()->scope()->num_parameters();
-  __ add(sp, sp, Operand((count + 1) * kPointerSize));
-  __ Jump(lr);
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
-  // The key should not be on the stack---if it is a compiler-generated
-  // temporary it is in the accumulator.
-  ASSERT(!key()->is_on_stack());
-
-  Comment cmnt(masm, "[ Load from Property");
-  // If the key is known at compile-time we may be able to use a load IC.
-  bool is_keyed_load = true;
-  if (key()->is_constant()) {
-    // Still use the keyed load IC if the key can be parsed as an integer so
-    // we will get into the case that handles [] on string objects.
-    Handle<Object> key_val = Constant::cast(key())->handle();
-    uint32_t ignored;
-    if (key_val->IsSymbol() &&
-        !String::cast(*key_val)->AsArrayIndex(&ignored)) {
-      is_keyed_load = false;
-    }
-  }
-
-  if (!object()->is_on_stack()) object()->Push(masm);
-
-  if (is_keyed_load) {
-    key()->Push(masm);
-    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    // Discard key and receiver.
-    __ add(sp, sp, Operand(2 * kPointerSize));
-  } else {
-    key()->Get(masm, r2);
-    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    __ pop();  // Discard receiver.
-  }
-  location()->Set(masm, r0);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
-  // The right-hand value should not be on the stack---if it is a
-  // compiler-generated temporary it is in the accumulator.
-  ASSERT(!right()->is_on_stack());
-
-  Comment cmnt(masm, "[ BinaryOpInstr");
-  // We can overwrite one of the operands if it is a temporary.
-  OverwriteMode mode = NO_OVERWRITE;
-  if (left()->is_temporary()) {
-    mode = OVERWRITE_LEFT;
-  } else if (right()->is_temporary()) {
-    mode = OVERWRITE_RIGHT;
-  }
-
-  // Move left to r1 and right to r0.
-  left()->Get(masm, r1);
-  right()->Get(masm, r0);
-  GenericBinaryOpStub stub(op(), mode);
-  __ CallStub(&stub);
-  location()->Set(masm, r0);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
-  // The location should be 'Effect'.  As a side effect, move the value to
-  // the accumulator.
-  Comment cmnt(masm, "[ ReturnInstr");
-  value()->Get(masm, r0);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
-  __ mov(reg, Operand(handle_));
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
-  __ mov(ip, Operand(handle_));
-  __ push(ip);
-}
-
-
-static MemOperand ToMemOperand(SlotLocation* loc) {
-  switch (loc->type()) {
-    case Slot::PARAMETER: {
-      int count = CfgGlobals::current()->fun()->scope()->num_parameters();
-      return MemOperand(fp, (1 + count - loc->index()) * kPointerSize);
-    }
-    case Slot::LOCAL: {
-      const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
-      return MemOperand(fp, kOffset - loc->index() * kPointerSize);
-    }
-    default:
-      UNREACHABLE();
-      return MemOperand(r0);
-  }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
-  __ mov(ip, Operand(handle_));
-  __ str(ip, ToMemOperand(loc));
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
-  __ ldr(reg, ToMemOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
-  __ str(reg, ToMemOperand(this));
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
-  __ ldr(ip, ToMemOperand(this));
-  __ push(ip);  // Push will not destroy ip.
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
-  // Double dispatch.
-  value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
-  __ ldr(ip, ToMemOperand(this));
-  __ str(ip, ToMemOperand(loc));
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
-  switch (where_) {
-    case ACCUMULATOR:
-      if (!reg.is(r0)) __ mov(reg, r0);
-      break;
-    case STACK:
-      __ pop(reg);
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
-  switch (where_) {
-    case ACCUMULATOR:
-      if (!reg.is(r0)) __ mov(r0, reg);
-      break;
-    case STACK:
-      __ push(reg);
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
-  switch (where_) {
-    case ACCUMULATOR:
-      __ push(r0);
-      break;
-    case STACK:
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
-  switch (where_) {
-    case ACCUMULATOR:
-      value->Get(masm, r0);
-    case STACK:
-      value->Push(masm);
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
-  switch (where_) {
-    case ACCUMULATOR:
-      __ str(r0, ToMemOperand(loc));
-    case STACK:
-      __ pop(ip);
-      __ str(ip, ToMemOperand(loc));
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-#undef __
-
-} }  // namespace v8::internal
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 477ea05..cdd32f3 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1188,7 +1188,6 @@
 #endif
   VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Declaration");
-  CodeForStatementPosition(node);
   Variable* var = node->proxy()->var();
   ASSERT(var != NULL);  // must have been resolved
   Slot* slot = var->slot();
@@ -2811,7 +2810,6 @@
 #endif
   VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Assignment");
-  CodeForStatementPosition(node);
 
   { Reference target(this, node->target());
     if (target.is_illegal()) {
@@ -2909,13 +2907,11 @@
   VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Call");
 
+  Expression* function = node->expression();
   ZoneList<Expression*>* args = node->arguments();
 
-  CodeForStatementPosition(node);
   // Standard function call.
-
   // Check if the function is a variable or a property.
-  Expression* function = node->expression();
   Variable* var = function->AsVariableProxy()->AsVariable();
   Property* property = function->AsProperty();
 
@@ -2928,7 +2924,56 @@
   // is resolved in cache misses (this also holds for megamorphic calls).
   // ------------------------------------------------------------------------
 
-  if (var != NULL && !var->is_this() && var->is_global()) {
+  if (var != NULL && var->is_possibly_eval()) {
+    // ----------------------------------
+    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
+    // ----------------------------------
+
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+    // Prepare stack for call to resolved function.
+    LoadAndSpill(function);
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    frame_->EmitPush(r2);  // Slot for receiver
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      LoadAndSpill(args->at(i));
+    }
+
+    // Prepare stack for call to ResolvePossiblyDirectEval.
+    __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
+    frame_->EmitPush(r1);
+    if (arg_count > 0) {
+      __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+      frame_->EmitPush(r1);
+    } else {
+      frame_->EmitPush(r2);
+    }
+
+    // Resolve the call.
+    frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+    // Touch up stack with the right values for the function and the receiver.
+    __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
+    __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+    __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
+    __ str(r1, MemOperand(sp, arg_count * kPointerSize));
+
+    // Call the function.
+    CodeForSourcePosition(node->position());
+
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub call_function(arg_count, in_loop);
+    frame_->CallStub(&call_function, arg_count + 1);
+
+    __ ldr(cp, frame_->Context());
+    // Remove the function from the stack.
+    frame_->Drop();
+    frame_->EmitPush(r0);
+
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
     // ----------------------------------
     // JavaScript example: 'foo(1, 2, 3)'  // foo is global
     // ----------------------------------
@@ -3053,72 +3098,12 @@
 }
 
 
-void CodeGenerator::VisitCallEval(CallEval* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ CallEval");
-
-  // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
-  // the function we need to call and the receiver of the call.
-  // Then we call the resolved function using the given arguments.
-
-  ZoneList<Expression*>* args = node->arguments();
-  Expression* function = node->expression();
-
-  CodeForStatementPosition(node);
-
-  // Prepare stack for call to resolved function.
-  LoadAndSpill(function);
-  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-  frame_->EmitPush(r2);  // Slot for receiver
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    LoadAndSpill(args->at(i));
-  }
-
-  // Prepare stack for call to ResolvePossiblyDirectEval.
-  __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
-  frame_->EmitPush(r1);
-  if (arg_count > 0) {
-    __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
-    frame_->EmitPush(r1);
-  } else {
-    frame_->EmitPush(r2);
-  }
-
-  // Resolve the call.
-  frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
-  // Touch up stack with the right values for the function and the receiver.
-  __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
-  __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-  __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
-  __ str(r1, MemOperand(sp, arg_count * kPointerSize));
-
-  // Call the function.
-  CodeForSourcePosition(node->position());
-
-  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub call_function(arg_count, in_loop);
-  frame_->CallStub(&call_function, arg_count + 1);
-
-  __ ldr(cp, frame_->Context());
-  // Remove the function from the stack.
-  frame_->Drop();
-  frame_->EmitPush(r0);
-  ASSERT(frame_->height() == original_height + 1);
-}
-
-
 void CodeGenerator::VisitCallNew(CallNew* node) {
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
   VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ CallNew");
-  CodeForStatementPosition(node);
 
   // According to ECMA-262, section 11.2.2, page 44, the function
   // expression in new calls must be evaluated before the
@@ -4960,12 +4945,12 @@
     Register scratch2) {  // Another scratch register.
   // Allocate an object in the heap for the heap number and tag it as a heap
   // object.
-  __ AllocateObjectInNewSpace(HeapNumber::kSize / kPointerSize,
-                              result,
-                              scratch1,
-                              scratch2,
-                              need_gc,
-                              TAG_OBJECT);
+  __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
+                        result,
+                        scratch1,
+                        scratch2,
+                        need_gc,
+                        TAG_OBJECT);
 
   // Get heap number map and store it in the allocated object.
   __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
@@ -5076,11 +5061,14 @@
   // r5: Address of heap number for result.
   __ push(lr);   // For later.
   __ push(r5);   // Address of heap number that is answer.
+  __ AlignStack(0);
   // Call C routine that may not cause GC or other trouble.
   __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
   __ Call(r5);
+  __ pop(r4);  // Address of heap number.
+  __ cmp(r4, Operand(Smi::FromInt(0)));
+  __ pop(r4, eq);  // Conditional pop instruction to get rid of alignment push.
   // Store answer in the overwritable heap number.
-  __ pop(r4);
 #if !defined(USE_ARM_EABI)
   // Double returned in fp coprocessor register 0 and 1, encoded as register
   // cr8.  Offsets must be divisible by 4 for coprocessor so we need to
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index b28e965..1eb0932 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -370,7 +370,7 @@
   // information.
   void CodeForFunctionPosition(FunctionLiteral* fun);
   void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(AstNode* node);
+  void CodeForStatementPosition(Statement* node);
   void CodeForSourcePosition(int pos);
 
 #ifdef DEBUG
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 6dd9b8f..45c6540 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -291,27 +291,8 @@
 
   // Align the stack at this point.  After this point we have 5 pushes,
   // so in fact we have to unalign here!  See also the assert on the
-  // alignment immediately below.
-#if defined(V8_HOST_ARCH_ARM)
-  // Running on the real platform. Use the alignment as mandated by the local
-  // environment.
-  // Note: This will break if we ever start generating snapshots on one ARM
-  // platform for another ARM platform with a different alignment.
-  int activation_frame_alignment = OS::ActivationFrameAlignment();
-#else  // defined(V8_HOST_ARCH_ARM)
-  // If we are using the simulator then we should always align to the expected
-  // alignment. As the simulator is used to generate snapshots we do not know
-  // if the target platform will need alignment, so we will always align at
-  // this point here.
-  int activation_frame_alignment = 2 * kPointerSize;
-#endif  // defined(V8_HOST_ARCH_ARM)
-  if (activation_frame_alignment != kPointerSize) {
-    // This code needs to be made more general if this assert doesn't hold.
-    ASSERT(activation_frame_alignment == 2 * kPointerSize);
-    mov(r7, Operand(Smi::FromInt(0)));
-    tst(sp, Operand(activation_frame_alignment - 1));
-    push(r7, eq);  // Conditional push instruction.
-  }
+  // alignment in AlignStack.
+  AlignStack(1);
 
   // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
   stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
@@ -343,6 +324,30 @@
 }
 
 
+void MacroAssembler::AlignStack(int offset) {
+#if defined(V8_HOST_ARCH_ARM)
+  // Running on the real platform. Use the alignment as mandated by the local
+  // environment.
+  // Note: This will break if we ever start generating snapshots on one ARM
+  // platform for another ARM platform with a different alignment.
+  int activation_frame_alignment = OS::ActivationFrameAlignment();
+#else  // defined(V8_HOST_ARCH_ARM)
+  // If we are using the simulator then we should always align to the expected
+  // alignment. As the simulator is used to generate snapshots we do not know
+  // if the target platform will need alignment, so we will always align at
+  // this point here.
+  int activation_frame_alignment = 2 * kPointerSize;
+#endif  // defined(V8_HOST_ARCH_ARM)
+  if (activation_frame_alignment != kPointerSize) {
+    // This code needs to be made more general if this assert doesn't hold.
+    ASSERT(activation_frame_alignment == 2 * kPointerSize);
+    mov(r7, Operand(Smi::FromInt(0)));
+    tst(sp, Operand(activation_frame_alignment - offset));
+    push(r7, eq);  // Conditional push instruction.
+  }
+}
+
+
 void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Restore the memory copy of the registers by digging them out from
@@ -763,12 +768,12 @@
 }
 
 
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
-                                              Register result,
-                                              Register scratch1,
-                                              Register scratch2,
-                                              Label* gc_required,
-                                              AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+                                        Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
   ASSERT(!result.is(scratch1));
   ASSERT(!scratch1.is(scratch2));
 
@@ -813,12 +818,12 @@
 }
 
 
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
-                                              Register result,
-                                              Register scratch1,
-                                              Register scratch2,
-                                              Label* gc_required,
-                                              AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+                                        Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
   ASSERT(!result.is(scratch1));
   ASSERT(!scratch1.is(scratch2));
 
@@ -1001,11 +1006,11 @@
   // should remove this need and make the runtime routine entry code
   // smarter.
   mov(r0, Operand(num_arguments));
-  JumpToBuiltin(ext);
+  JumpToRuntime(ext);
 }
 
 
-void MacroAssembler::JumpToBuiltin(const ExternalReference& builtin) {
+void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
 #if defined(__thumb__)
   // Thumb mode builtin.
   ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
@@ -1046,7 +1051,6 @@
     int argc = Builtins::GetArgumentsCount(id);
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(false);
     Unresolved entry = { pc_offset() - kInstrSize, flags, name };
     unresolved_.Add(entry);
@@ -1064,7 +1068,6 @@
     int argc = Builtins::GetArgumentsCount(id);
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(true);
     Unresolved entry = { pc_offset() - kInstrSize, flags, name };
     unresolved_.Add(entry);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 03aa4d0..ee9d70d 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -96,6 +96,8 @@
   // Leave the current exit frame. Expects the return value in r0.
   void LeaveExitFrame(StackFrame::Type type);
 
+  // Align the stack by optionally pushing a Smi zero.
+  void AlignStack(int offset);
 
   // ---------------------------------------------------------------------------
   // JavaScript invokes
@@ -171,18 +173,18 @@
   // bytes). If the new space is exhausted control continues at the gc_required
   // label. The allocated object is returned in result. If the flag
   // tag_allocated_object is true the result is tagged as as a heap object.
-  void AllocateObjectInNewSpace(int object_size,
-                                Register result,
-                                Register scratch1,
-                                Register scratch2,
-                                Label* gc_required,
-                                AllocationFlags flags);
-  void AllocateObjectInNewSpace(Register object_size,
-                                Register result,
-                                Register scratch1,
-                                Register scratch2,
-                                Label* gc_required,
-                                AllocationFlags flags);
+  void AllocateInNewSpace(int object_size,
+                          Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Label* gc_required,
+                          AllocationFlags flags);
+  void AllocateInNewSpace(Register object_size,
+                          Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Label* gc_required,
+                          AllocationFlags flags);
 
   // Undo allocation in new space. The object passed and objects allocated after
   // it will no longer be allocated. The caller must make sure that no pointers
@@ -257,14 +259,14 @@
   void CallRuntime(Runtime::FunctionId fid, int num_arguments);
 
   // Tail call of a runtime routine (jump).
-  // Like JumpToBuiltin, but also takes care of passing the number
+  // Like JumpToRuntime, but also takes care of passing the number
   // of parameters.
   void TailCallRuntime(const ExternalReference& ext,
                        int num_arguments,
                        int result_size);
 
-  // Jump to the builtin routine.
-  void JumpToBuiltin(const ExternalReference& builtin);
+  // Jump to a runtime routine.
+  void JumpToRuntime(const ExternalReference& builtin);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
@@ -329,8 +331,16 @@
                       Label* done,
                       InvokeFlag flag);
 
-  // Get the code for the given builtin. Returns if able to resolve
-  // the function in the 'resolved' flag.
+  // Prepares for a call or jump to a builtin by doing two things:
+  // 1. Emits code that fetches the builtin's function object from the context
+  //    at runtime, and puts it in the register rdi.
+  // 2. Fetches the builtin's code object, and returns it in a handle, at
+  //    compile time, so that later code can emit instructions to jump or call
+  //    the builtin directly.  If the code object has not yet been created, it
+  //    returns the builtin code object for IllegalFunction, and sets the
+  //    output parameter "resolved" to false.  Code that uses the return value
+  //    should then add the address and the builtin name to the list of fixups
+  //    called unresolved_, which is fixed up by the bootstrapper.
   Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
 
   // Activation support.
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 70dfcd2..22bec82 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -409,7 +409,7 @@
 
 
 Simulator::Simulator() {
-  ASSERT(initialized_);
+  Initialize();
   // Setup simulator support first. Some of this information is needed to
   // setup the architecture state.
   size_t stack_size = 1 * 1024*1024;  // allocate 1MB for stack
@@ -501,6 +501,7 @@
 
 // Get the active Simulator for the current thread.
 Simulator* Simulator::current() {
+  Initialize();
   Simulator* sim = reinterpret_cast<Simulator*>(
       v8::internal::Thread::GetThreadLocal(simulator_key));
   if (sim == NULL) {
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 3917d6a..ff6bbf4 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -36,18 +36,23 @@
 #ifndef V8_ARM_SIMULATOR_ARM_H_
 #define V8_ARM_SIMULATOR_ARM_H_
 
+#include "allocation.h"
+
 #if defined(__arm__)
 
 // When running without a simulator we call the entry directly.
 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
   (entry(p0, p1, p2, p3, p4))
 
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
-  (reinterpret_cast<uintptr_t>(this) - limit)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on arm uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return c_limit;
+  }
+};
 
 
 // Call the generated regexp code directly. The entry function pointer should
@@ -64,12 +69,6 @@
       assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
                                                  p0, p1, p2, p3, p4))
 
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
-  (assembler::arm::Simulator::current()->StackLimit())
-
-
 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
   assembler::arm::Simulator::current()->Call( \
     FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
@@ -219,6 +218,20 @@
 
 } }  // namespace assembler::arm
 
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.  Setting the c_limit to indicate a very small
+// stack cause stack overflow errors, since the simulator ignores the input.
+// This is unlikely to be an issue in practice, though it might cause testing
+// trouble down the line.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return assembler::arm::Simulator::current()->StackLimit();
+  }
+};
+
+
 #endif  // defined(__arm__)
 
 #endif  // V8_ARM_SIMULATOR_ARM_H_
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 9e44cfa..8282655 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1390,12 +1390,12 @@
   // r2: initial map
   // r7: undefined
   __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-  __ AllocateObjectInNewSpace(r3,
-                              r4,
-                              r5,
-                              r6,
-                              &generic_stub_call,
-                              NO_ALLOCATION_FLAGS);
+  __ AllocateInNewSpace(r3,
+                        r4,
+                        r5,
+                        r6,
+                        &generic_stub_call,
+                        NO_ALLOCATION_FLAGS);
 
   // Allocated the JSObject, now initialize the fields. Map is set to initial
   // map and properties and elements are set to empty fixed array.