Version 1.3.6

Add support for forceful termination of JavaScript execution.

Add low memory notification to the API. The embedding host can signal a low memory situation to V8.

Changed the handling of global handles (persistent handles in the API sense) to avoid issues regarding allocation of new global handles during weak handle callbacks.

Changed the growth policy of the young space.

Fixed a GC issue introduced in version 1.3.5.



git-svn-id: http://v8.googlecode.com/svn/trunk@3024 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index 4230647..b6c2b4d 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -36,49 +36,48 @@
 SOURCES = {
   'all': [
     'accessors.cc', 'allocation.cc', 'api.cc', 'assembler.cc', 'ast.cc',
-    'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'cfg.cc',
-    'code-stubs.cc', 'codegen.cc', 'compilation-cache.cc', 'compiler.cc',
-    'contexts.cc', 'conversions.cc', 'counters.cc', 'dateparser.cc',
-    'debug.cc', 'debug-agent.cc', 'disassembler.cc', 'execution.cc',
-    'factory.cc', 'flags.cc', 'frame-element.cc', 'frames.cc',
-    'func-name-inferrer.cc', 'global-handles.cc', 'handles.cc',
-    'hashmap.cc', 'heap.cc', 'heap-profiler.cc', 'ic.cc',
-    'interpreter-irregexp.cc', 'jsregexp.cc', 'jump-target.cc',
-    'log.cc', 'log-utils.cc', 'mark-compact.cc', 'messages.cc',
-    'objects.cc', 'oprofile-agent.cc', 'parser.cc', 'property.cc',
-    'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc',
-    'regexp-stack.cc', 'register-allocator.cc', 'rewriter.cc',
-    'runtime.cc', 'scanner.cc', 'scopeinfo.cc', 'scopes.cc',
-    'serialize.cc', 'snapshot-common.cc', 'spaces.cc',
-    'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
+    'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
+    'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
+    'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
+    'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc',
+    'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc',
+    'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc',
+    'heap-profiler.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
+    'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc',
+    'messages.cc', 'objects.cc', 'oprofile-agent.cc', 'parser.cc',
+    'property.cc', 'regexp-macro-assembler.cc',
+    'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
+    'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
+    'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
+    'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
     'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
     'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
     'virtual-frame.cc', 'zone.cc'
   ],
   'arch:arm': [
-    'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/cfg-arm.cc',
-    'arm/codegen-arm.cc', 'arm/constants-arm.cc', 'arm/cpu-arm.cc',
-    'arm/disasm-arm.cc', 'arm/debug-arm.cc', 'arm/frames-arm.cc',
-    'arm/ic-arm.cc', 'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
-    'arm/regexp-macro-assembler-arm.cc',
-    'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc',
-    'arm/virtual-frame-arm.cc'
+    'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/codegen-arm.cc',
+    'arm/constants-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc',
+    'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc',
+    'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
+    'arm/regexp-macro-assembler-arm.cc', 'arm/register-allocator-arm.cc',
+    'arm/stub-cache-arm.cc', 'arm/virtual-frame-arm.cc'
   ],
   'arch:ia32': [
-    'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc',
+    'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc',
     'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
     'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
     'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
-    'ia32/regexp-macro-assembler-ia32.cc', 'ia32/register-allocator-ia32.cc',
-    'ia32/stub-cache-ia32.cc', 'ia32/virtual-frame-ia32.cc'
+    'ia32/regexp-macro-assembler-ia32.cc',
+    'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc',
+    'ia32/virtual-frame-ia32.cc'
   ],
   'arch:x64': [
-    'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc',
-    'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
-    'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
-    'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
-    'x64/regexp-macro-assembler-x64.cc', 'x64/register-allocator-x64.cc',
-    'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
+    'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/codegen-x64.cc',
+    'x64/cpu-x64.cc', 'x64/disasm-x64.cc', 'x64/debug-x64.cc',
+    'x64/frames-x64.cc', 'x64/ic-x64.cc', 'x64/jump-target-x64.cc',
+    'x64/macro-assembler-x64.cc', 'x64/regexp-macro-assembler-x64.cc',
+    'x64/register-allocator-x64.cc', 'x64/stub-cache-x64.cc',
+    'x64/virtual-frame-x64.cc'
   ],
   'simulator:arm': ['arm/simulator-arm.cc'],
   'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
diff --git a/src/api.cc b/src/api.cc
index 649647b..00f1e0b 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "api.h"
+#include "arguments.h"
 #include "bootstrapper.h"
 #include "compiler.h"
 #include "debug.h"
@@ -71,7 +72,7 @@
     thread_local.DecrementCallDepth();                                         \
     if (has_pending_exception) {                                               \
       if (thread_local.CallDepthIsZero() && i::Top::is_out_of_memory()) {      \
-        if (!thread_local.IgnoreOutOfMemory())                                 \
+        if (!thread_local.ignore_out_of_memory())                              \
           i::V8::FatalProcessOutOfMemory(NULL);                                \
       }                                                                        \
       bool call_depth_is_zero = thread_local.CallDepthIsZero();                \
@@ -341,9 +342,12 @@
 
 
 bool SetResourceConstraints(ResourceConstraints* constraints) {
-  bool result = i::Heap::ConfigureHeap(constraints->max_young_space_size(),
-                                       constraints->max_old_space_size());
-  if (!result) return false;
+  int semispace_size = constraints->max_young_space_size();
+  int old_gen_size = constraints->max_old_space_size();
+  if (semispace_size != 0 || old_gen_size != 0) {
+    bool result = i::Heap::ConfigureHeap(semispace_size, old_gen_size);
+    if (!result) return false;
+  }
   if (constraints->stack_limit() != NULL) {
     uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
     i::StackGuard::SetStackLimit(limit);
@@ -2446,20 +2450,14 @@
   ENTER_V8;
   ASSERT(start >= 0 && length >= -1);
   i::Handle<i::String> str = Utils::OpenHandle(this);
-  // Flatten the string for efficiency.  This applies whether we are
-  // using StringInputBuffer or Get(i) to access the characters.
-  str->TryFlattenIfNotFlat();
   int end = length;
   if ( (length == -1) || (length > str->length() - start) )
     end = str->length() - start;
   if (end < 0) return 0;
-  write_input_buffer.Reset(start, *str);
-  int i;
-  for (i = 0; i < end; i++)
-    buffer[i] = write_input_buffer.GetNext();
-  if (length == -1 || i < length)
-    buffer[i] = '\0';
-  return i;
+  i::String::WriteToFlat(*str, buffer, start, end);
+  if (length == -1 || end < length)
+    buffer[end] = '\0';
+  return end;
 }
 
 
@@ -2604,9 +2602,11 @@
 }
 
 
-bool v8::V8::IdleNotification(bool is_high_priority) {
-  if (!i::V8::IsRunning()) return false;
-  return i::V8::IdleNotification(is_high_priority);
+bool v8::V8::IdleNotification() {
+  // Returning true tells the caller that it need not
+  // continue to call IdleNotification.
+  if (!i::V8::IsRunning()) return true;
+  return i::V8::IdleNotification();
 }
 
 
@@ -2767,7 +2767,9 @@
 
 v8::Local<v8::Context> Context::GetCalling() {
   if (IsDeadCheck("v8::Context::GetCalling()")) return Local<Context>();
-  i::Handle<i::Context> context(i::Top::GetCallingGlobalContext());
+  i::Handle<i::Object> calling = i::Top::GetCallingGlobalContext();
+  if (calling.is_null()) return Local<Context>();
+  i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
   return Utils::ToLocal(context);
 }
 
@@ -3214,7 +3216,7 @@
 
 
 void V8::IgnoreOutOfMemoryException() {
-  thread_local.SetIgnoreOutOfMemory(true);
+  thread_local.set_ignore_out_of_memory(true);
 }
 
 
@@ -3696,6 +3698,11 @@
 }
 
 
+void HandleScopeImplementer::FreeThreadResources() {
+  thread_local.Free();
+}
+
+
 char* HandleScopeImplementer::ArchiveThread(char* storage) {
   return thread_local.ArchiveThreadHelper(storage);
 }
@@ -3707,7 +3714,7 @@
   handle_scope_data_ = *current;
   memcpy(storage, this, sizeof(*this));
 
-  Initialize();
+  ResetAfterArchive();
   current->Initialize();
 
   return storage + ArchiveSpacePerThread();
@@ -3733,14 +3740,14 @@
 
 void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
   // Iterate over all handles in the blocks except for the last.
-  for (int i = Blocks()->length() - 2; i >= 0; --i) {
-    Object** block = Blocks()->at(i);
+  for (int i = blocks()->length() - 2; i >= 0; --i) {
+    Object** block = blocks()->at(i);
     v->VisitPointers(block, &block[kHandleBlockSize]);
   }
 
   // Iterate over live handles in the last block (if any).
-  if (!Blocks()->is_empty()) {
-    v->VisitPointers(Blocks()->last(), handle_scope_data_.next);
+  if (!blocks()->is_empty()) {
+    v->VisitPointers(blocks()->last(), handle_scope_data_.next);
   }
 
   if (!saved_contexts_.is_empty()) {
diff --git a/src/api.h b/src/api.h
index 9ae6307..1221f35 100644
--- a/src/api.h
+++ b/src/api.h
@@ -311,20 +311,12 @@
  public:
 
   HandleScopeImplementer()
-      : blocks(0),
+      : blocks_(0),
         entered_contexts_(0),
-        saved_contexts_(0) {
-    Initialize();
-  }
-
-  void Initialize() {
-    blocks.Initialize(0);
-    entered_contexts_.Initialize(0);
-    saved_contexts_.Initialize(0);
-    spare = NULL;
-    ignore_out_of_memory = false;
-    call_depth = 0;
-  }
+        saved_contexts_(0),
+        spare_(NULL),
+        ignore_out_of_memory_(false),
+        call_depth_(0) { }
 
   static HandleScopeImplementer* instance();
 
@@ -332,6 +324,7 @@
   static int ArchiveSpacePerThread();
   static char* RestoreThread(char* from);
   static char* ArchiveThread(char* to);
+  static void FreeThreadResources();
 
   // Garbage collection support.
   static void Iterate(v8::internal::ObjectVisitor* v);
@@ -341,9 +334,9 @@
   inline internal::Object** GetSpareOrNewBlock();
   inline void DeleteExtensions(int extensions);
 
-  inline void IncrementCallDepth() {call_depth++;}
-  inline void DecrementCallDepth() {call_depth--;}
-  inline bool CallDepthIsZero() { return call_depth == 0; }
+  inline void IncrementCallDepth() {call_depth_++;}
+  inline void DecrementCallDepth() {call_depth_--;}
+  inline bool CallDepthIsZero() { return call_depth_ == 0; }
 
   inline void EnterContext(Handle<Object> context);
   inline bool LeaveLastContext();
@@ -356,20 +349,44 @@
   inline Context* RestoreContext();
   inline bool HasSavedContexts();
 
-  inline List<internal::Object**>* Blocks() { return &blocks; }
-
-  inline bool IgnoreOutOfMemory() { return ignore_out_of_memory; }
-  inline void SetIgnoreOutOfMemory(bool value) { ignore_out_of_memory = value; }
+  inline List<internal::Object**>* blocks() { return &blocks_; }
+  inline bool ignore_out_of_memory() { return ignore_out_of_memory_; }
+  inline void set_ignore_out_of_memory(bool value) {
+    ignore_out_of_memory_ = value;
+  }
 
  private:
-  List<internal::Object**> blocks;
-  Object** spare;
-  int call_depth;
+  void ResetAfterArchive() {
+    blocks_.Initialize(0);
+    entered_contexts_.Initialize(0);
+    saved_contexts_.Initialize(0);
+    spare_ = NULL;
+    ignore_out_of_memory_ = false;
+    call_depth_ = 0;
+  }
+
+  void Free() {
+    ASSERT(blocks_.length() == 0);
+    ASSERT(entered_contexts_.length() == 0);
+    ASSERT(saved_contexts_.length() == 0);
+    blocks_.Free();
+    entered_contexts_.Free();
+    saved_contexts_.Free();
+    if (spare_ != NULL) {
+      DeleteArray(spare_);
+      spare_ = NULL;
+    }
+    ASSERT(call_depth_ == 0);
+  }
+
+  List<internal::Object**> blocks_;
   // Used as a stack to keep track of entered contexts.
   List<Handle<Object> > entered_contexts_;
   // Used as a stack to keep track of saved contexts.
   List<Context*> saved_contexts_;
-  bool ignore_out_of_memory;
+  Object** spare_;
+  bool ignore_out_of_memory_;
+  int call_depth_;
   // This is only used for threading support.
   v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
 
@@ -419,32 +436,32 @@
 
 // If there's a spare block, use it for growing the current scope.
 internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
-  internal::Object** block = (spare != NULL) ?
-      spare :
+  internal::Object** block = (spare_ != NULL) ?
+      spare_ :
       NewArray<internal::Object*>(kHandleBlockSize);
-  spare = NULL;
+  spare_ = NULL;
   return block;
 }
 
 
 void HandleScopeImplementer::DeleteExtensions(int extensions) {
-  if (spare != NULL) {
-    DeleteArray(spare);
-    spare = NULL;
+  if (spare_ != NULL) {
+    DeleteArray(spare_);
+    spare_ = NULL;
   }
   for (int i = extensions; i > 1; --i) {
-    internal::Object** block = blocks.RemoveLast();
+    internal::Object** block = blocks_.RemoveLast();
 #ifdef DEBUG
     v8::ImplementationUtilities::ZapHandleRange(block,
                                                 &block[kHandleBlockSize]);
 #endif
     DeleteArray(block);
   }
-  spare = blocks.RemoveLast();
+  spare_ = blocks_.RemoveLast();
 #ifdef DEBUG
   v8::ImplementationUtilities::ZapHandleRange(
-      spare,
-      &spare[kHandleBlockSize]);
+      spare_,
+      &spare_[kHandleBlockSize]);
 #endif
 }
 
diff --git a/src/arguments.h b/src/arguments.h
index 80f9006..d2f1bfc 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -45,6 +45,9 @@
 
 class Arguments BASE_EMBEDDED {
  public:
+  Arguments(int length, Object** arguments)
+      : length_(length), arguments_(arguments) { }
+
   Object*& operator[] (int index) {
     ASSERT(0 <= index && index < length_);
     return arguments_[-index];
@@ -61,11 +64,34 @@
   // Get the total number of arguments including the receiver.
   int length() const { return length_; }
 
+  Object** arguments() { return arguments_; }
+
  private:
   int length_;
   Object** arguments_;
 };
 
+
+// Cursom arguments replicate a small segment of stack that can be
+// accessed through an Arguments object the same way the actual stack
+// can.
+class CustomArguments : public Relocatable {
+ public:
+  inline CustomArguments(Object *data,
+                         JSObject *self,
+                         JSObject *holder) {
+    values_[3] = self;
+    values_[2] = holder;
+    values_[1] = Smi::FromInt(0);
+    values_[0] = data;
+  }
+  void IterateInstance(ObjectVisitor* v);
+  Object** end() { return values_ + 3; }
+ private:
+  Object* values_[4];
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_ARGUMENTS_H_
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index cd5a1bb..5417ed7 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -81,7 +81,13 @@
 
 Object* RelocInfo::target_object() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+  return Memory::Object_at(Assembler::target_address_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
 }
 
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 7e43f2e..d1df08c 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -645,8 +645,8 @@
     str(src, MemOperand(sp, 4, NegPreIndex), cond);
   }
 
-  void pop(Register dst) {
-    ldr(dst, MemOperand(sp, 4, PostIndex), al);
+  void pop(Register dst, Condition cond = al) {
+    ldr(dst, MemOperand(sp, 4, PostIndex), cond);
   }
 
   void pop() {
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index cdea1cb..d7afb37 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -44,15 +44,379 @@
   __ str(r1, MemOperand(ip, 0));
 
   // The actual argument count has already been loaded into register
-  // r0, but JumpToBuiltin expects r0 to contain the number of
+  // r0, but JumpToRuntime expects r0 to contain the number of
   // arguments including the receiver.
   __ add(r0, r0, Operand(1));
-  __ JumpToBuiltin(ExternalReference(id));
+  __ JumpToRuntime(ExternalReference(id));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the global context.
+
+  __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ ldr(result,
+         FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+  // Load the Array function from the global context.
+  __ ldr(result,
+         MemOperand(result,
+                    Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. An elements backing store is allocated with size initial_capacity
+// and filled with the hole values.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+                                 Register array_function,
+                                 Register result,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Register scratch3,
+                                 int initial_capacity,
+                                 Label* gc_required) {
+  ASSERT(initial_capacity > 0);
+  // Load the initial map from the array function.
+  __ ldr(scratch1, FieldMemOperand(array_function,
+                                   JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Allocate the JSArray object together with space for a fixed array with the
+  // requested elements.
+  int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+  __ AllocateInNewSpace(size / kPointerSize,
+                        result,
+                        scratch2,
+                        scratch3,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // scratch1: initial map
+  // scratch2: start of next object
+  __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+  __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
+  // Field JSArray::kElementsOffset is initialized later.
+  __ mov(scratch3,  Operand(0));
+  __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // scratch2: start of next object
+  __ lea(scratch1, MemOperand(result, JSArray::kSize));
+  __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+
+  // Clear the heap tag on the elements array.
+  __ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+
+  // Initialize the FixedArray and fill it with holes. FixedArray length is not
+  // stored as a smi.
+  // result: JSObject
+  // scratch1: elements array (untagged)
+  // scratch2: start of next object
+  __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+  __ mov(scratch3,  Operand(initial_capacity));
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+
+  // Fill the FixedArray with the hole value.
+  ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+  ASSERT(initial_capacity <= kLoopUnfoldLimit);
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+  for (int i = 0; i < initial_capacity; i++) {
+    __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
+  }
+}
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array_storage and elements_array_end
+// (see  below for when that is not the case). If the parameter fill_with_holes
+// is true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array_storage is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+                            Register array_function,  // Array function.
+                            Register array_size,  // As a smi.
+                            Register result,
+                            Register elements_array_storage,
+                            Register elements_array_end,
+                            Register scratch1,
+                            Register scratch2,
+                            bool fill_with_hole,
+                            Label* gc_required) {
+  Label not_empty, allocated;
+
+  // Load the initial map from the array function.
+  __ ldr(elements_array_storage,
+         FieldMemOperand(array_function,
+                         JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ tst(array_size, array_size);
+  __ b(nz, &not_empty);
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize +
+             FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size / kPointerSize,
+                        result,
+                        elements_array_end,
+                        scratch1,
+                        gc_required,
+                        TAG_OBJECT);
+  __ jmp(&allocated);
+
+  // Allocate the JSArray object together with space for a FixedArray with the
+  // requested number of elements.
+  __ bind(&not_empty);
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ mov(elements_array_end,
+         Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
+  __ add(elements_array_end,
+         elements_array_end,
+         Operand(array_size, ASR, kSmiTagSize));
+  __ AllocateInNewSpace(elements_array_end,
+                        result,
+                        scratch1,
+                        scratch2,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // elements_array_storage: initial map
+  // array_size: size of array (smi)
+  __ bind(&allocated);
+  __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
+  __ str(elements_array_storage,
+         FieldMemOperand(result, JSArray::kPropertiesOffset));
+  // Field JSArray::kElementsOffset is initialized later.
+  __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // array_size: size of array (smi)
+  __ add(elements_array_storage, result, Operand(JSArray::kSize));
+  __ str(elements_array_storage,
+         FieldMemOperand(result, JSArray::kElementsOffset));
+
+  // Clear the heap tag on the elements array.
+  __ and_(elements_array_storage,
+          elements_array_storage,
+          Operand(~kHeapObjectTagMask));
+  // Initialize the fixed array and fill it with holes. FixedArray length is not
+  // stored as a smi.
+  // result: JSObject
+  // elements_array_storage: elements array (untagged)
+  // array_size: size of array (smi)
+  ASSERT(kSmiTag == 0);
+  __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
+  // Convert array_size from smi to value.
+  __ mov(array_size,
+         Operand(array_size, ASR, kSmiTagSize));
+  __ tst(array_size, array_size);
+  // Length of the FixedArray is the number of pre-allocated elements if
+  // the actual JSArray has length 0 and the size of the JSArray for non-empty
+  // JSArrays. The length of a FixedArray is not stored as a smi.
+  __ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq);
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  __ str(array_size,
+         MemOperand(elements_array_storage, kPointerSize, PostIndex));
+
+  // Calculate elements array and elements array end.
+  // result: JSObject
+  // elements_array_storage: elements array element storage
+  // array_size: size of elements array
+  __ add(elements_array_end,
+         elements_array_storage,
+         Operand(array_size, LSL, kPointerSizeLog2));
+
+  // Fill the allocated FixedArray with the hole value if requested.
+  // result: JSObject
+  // elements_array_storage: elements array element storage
+  // elements_array_end: start of next object
+  if (fill_with_hole) {
+    Label loop, entry;
+    __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ str(scratch1,
+           MemOperand(elements_array_storage, kPointerSize, PostIndex));
+    __ bind(&entry);
+    __ cmp(elements_array_storage, elements_array_end);
+    __ b(lt, &loop);
+  }
+}
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+//   r0: argc
+//   r1: constructor (built-in Array function)
+//   lr: return address
+//   sp[0]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in r1 needs to be preserved for
+// entering the generic code. In both cases argc in r0 needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// construct call and normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+                            Label *call_generic_code) {
+  Label argc_one_or_more, argc_two_or_more;
+
+  // Check for array construction with zero arguments or one.
+  __ cmp(r0, Operand(0));
+  __ b(ne, &argc_one_or_more);
+
+  // Handle construction of an empty array.
+  AllocateEmptyJSArray(masm,
+                       r1,
+                       r2,
+                       r3,
+                       r4,
+                       r5,
+                       JSArray::kPreallocatedArrayElements,
+                       call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1, r3, r4);
+  // Setup return value, remove receiver from stack and return.
+  __ mov(r0, r2);
+  __ add(sp, sp, Operand(kPointerSize));
+  __ Jump(lr);
+
+  // Check for one argument. Bail out if argument is not smi or if it is
+  // negative.
+  __ bind(&argc_one_or_more);
+  __ cmp(r0, Operand(1));
+  __ b(ne, &argc_two_or_more);
+  ASSERT(kSmiTag == 0);
+  __ ldr(r2, MemOperand(sp));  // Get the argument from the stack.
+  __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
+  __ b(ne, call_generic_code);
+
+  // Handle construction of an empty array of a certain size. Bail out if size
+  // is too large to actually allocate an elements array.
+  ASSERT(kSmiTag == 0);
+  __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
+  __ b(ge, call_generic_code);
+
+  // r0: argc
+  // r1: constructor
+  // r2: array_size (smi)
+  // sp[0]: argument
+  AllocateJSArray(masm,
+                  r1,
+                  r2,
+                  r3,
+                  r4,
+                  r5,
+                  r6,
+                  r7,
+                  true,
+                  call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1, r2, r4);
+  // Setup return value, remove receiver and argument from stack and return.
+  __ mov(r0, r3);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Jump(lr);
+
+  // Handle construction of an array from a list of arguments.
+  __ bind(&argc_two_or_more);
+  __ mov(r2, Operand(r0, LSL, kSmiTagSize));  // Convet argc to a smi.
+
+  // r0: argc
+  // r1: constructor
+  // r2: array_size (smi)
+  // sp[0]: last argument
+  AllocateJSArray(masm,
+                  r1,
+                  r2,
+                  r3,
+                  r4,
+                  r5,
+                  r6,
+                  r7,
+                  false,
+                  call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1, r2, r6);
+
+  // Fill arguments as array elements. Copy from the top of the stack (last
+  // element) to the array backing store filling it backwards. Note:
+  // elements_array_end points after the backing store therefore PreIndex is
+  // used when filling the backing store.
+  // r0: argc
+  // r3: JSArray
+  // r4: elements_array storage start (untagged)
+  // r5: elements_array_end (untagged)
+  // sp[0]: last argument
+  Label loop, entry;
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
+  __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
+  __ bind(&entry);
+  __ cmp(r4, r5);
+  __ b(lt, &loop);
+
+  // Remove caller arguments and receiver from the stack, setup return value and
+  // return.
+  // r0: argc
+  // r3: JSArray
+  // sp[0]: receiver
+  __ add(sp, sp, Operand(kPointerSize));
+  __ mov(r0, r3);
+  __ Jump(lr);
 }
 
 
 void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
-  // Just jump to the generic array code.
+  // ----------- S t a t e -------------
+  //  -- r0     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, r1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array function shoud be a map.
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ tst(r2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for Array function");
+    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+    __ Assert(eq, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  ArrayNativeCode(masm, &generic_array_code);
+
+  // Jump to the generic array code if the specialized code cannot handle
+  // the construction.
+  __ bind(&generic_array_code);
   Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
   Handle<Code> array_code(code);
   __ Jump(array_code, RelocInfo::CODE_TARGET);
@@ -60,7 +424,34 @@
 
 
 void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
-  // Just jump to the generic construct code.
+  // ----------- S t a t e -------------
+  //  -- r0     : number of arguments
+  //  -- r1     : constructor function
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_constructor;
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the builtin Array function which
+    // always have a map.
+    GenerateLoadArrayFunction(masm, r2);
+    __ cmp(r1, r2);
+    __ Assert(eq, "Unexpected Array function");
+    // Initial map for the builtin Array function should be a map.
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ tst(r2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for Array function");
+    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+    __ Assert(eq, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as a constructor.
+  ArrayNativeCode(masm, &generic_constructor);
+
+  // Jump to the generic construct code in case the specialized code cannot
+  // handle the construction.
+  __ bind(&generic_constructor);
   Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
   Handle<Code> generic_construct_stub(code);
   __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
@@ -149,7 +540,7 @@
     // r2: initial map
     // r7: undefined
     __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-    __ AllocateObjectInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
+    __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
 
     // Allocated the JSObject, now initialize the fields. Map is set to initial
     // map and properties and elements are set to empty fixed array.
@@ -220,12 +611,12 @@
     // r5: start of next object
     // r7: undefined
     __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
-    __ AllocateObjectInNewSpace(r0,
-                                r5,
-                                r6,
-                                r2,
-                                &undo_allocation,
-                                RESULT_CONTAINS_TOP);
+    __ AllocateInNewSpace(r0,
+                          r5,
+                          r6,
+                          r2,
+                          &undo_allocation,
+                          RESULT_CONTAINS_TOP);
 
     // Initialize the FixedArray.
     // r1: constructor
diff --git a/src/arm/cfg-arm.cc b/src/arm/cfg-arm.cc
deleted file mode 100644
index e0e563c..0000000
--- a/src/arm/cfg-arm.cc
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-arm.h"  // Include after codegen-inl.h.
-#include "macro-assembler-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
-  ASSERT(!is_marked());
-  is_marked_ = true;
-  {
-    Comment cmt(masm, "[ InstructionBlock");
-    for (int i = 0, len = instructions_.length(); i < len; i++) {
-      // If the location of the current instruction is a temp, then the
-      // instruction cannot be in tail position in the block.  Allocate the
-      // temp based on peeking ahead to the next instruction.
-      Instruction* instr = instructions_[i];
-      Location* loc = instr->location();
-      if (loc->is_temporary()) {
-        instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
-      }
-      instructions_[i]->Compile(masm);
-    }
-  }
-  successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
-  ASSERT(!is_marked());
-  is_marked_ = true;
-  {
-    Comment cmnt(masm, "[ EntryNode");
-    __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
-    __ add(fp, sp, Operand(2 * kPointerSize));
-    int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
-    if (count > 0) {
-      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-      for (int i = 0; i < count; i++) {
-        __ push(ip);
-      }
-    }
-    if (FLAG_trace) {
-      __ CallRuntime(Runtime::kTraceEnter, 0);
-    }
-    if (FLAG_check_stack) {
-      StackCheckStub stub;
-      __ CallStub(&stub);
-    }
-  }
-  successor_->Compile(masm);
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
-  ASSERT(!is_marked());
-  is_marked_ = true;
-  Comment cmnt(masm, "[ ExitNode");
-  if (FLAG_trace) {
-    __ push(r0);
-    __ CallRuntime(Runtime::kTraceExit, 1);
-  }
-  __ mov(sp, fp);
-  __ ldm(ia_w, sp, fp.bit() | lr.bit());
-  int count = CfgGlobals::current()->fun()->scope()->num_parameters();
-  __ add(sp, sp, Operand((count + 1) * kPointerSize));
-  __ Jump(lr);
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
-  // The key should not be on the stack---if it is a compiler-generated
-  // temporary it is in the accumulator.
-  ASSERT(!key()->is_on_stack());
-
-  Comment cmnt(masm, "[ Load from Property");
-  // If the key is known at compile-time we may be able to use a load IC.
-  bool is_keyed_load = true;
-  if (key()->is_constant()) {
-    // Still use the keyed load IC if the key can be parsed as an integer so
-    // we will get into the case that handles [] on string objects.
-    Handle<Object> key_val = Constant::cast(key())->handle();
-    uint32_t ignored;
-    if (key_val->IsSymbol() &&
-        !String::cast(*key_val)->AsArrayIndex(&ignored)) {
-      is_keyed_load = false;
-    }
-  }
-
-  if (!object()->is_on_stack()) object()->Push(masm);
-
-  if (is_keyed_load) {
-    key()->Push(masm);
-    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    // Discard key and receiver.
-    __ add(sp, sp, Operand(2 * kPointerSize));
-  } else {
-    key()->Get(masm, r2);
-    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    __ pop();  // Discard receiver.
-  }
-  location()->Set(masm, r0);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
-  // The right-hand value should not be on the stack---if it is a
-  // compiler-generated temporary it is in the accumulator.
-  ASSERT(!right()->is_on_stack());
-
-  Comment cmnt(masm, "[ BinaryOpInstr");
-  // We can overwrite one of the operands if it is a temporary.
-  OverwriteMode mode = NO_OVERWRITE;
-  if (left()->is_temporary()) {
-    mode = OVERWRITE_LEFT;
-  } else if (right()->is_temporary()) {
-    mode = OVERWRITE_RIGHT;
-  }
-
-  // Move left to r1 and right to r0.
-  left()->Get(masm, r1);
-  right()->Get(masm, r0);
-  GenericBinaryOpStub stub(op(), mode);
-  __ CallStub(&stub);
-  location()->Set(masm, r0);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
-  // The location should be 'Effect'.  As a side effect, move the value to
-  // the accumulator.
-  Comment cmnt(masm, "[ ReturnInstr");
-  value()->Get(masm, r0);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
-  __ mov(reg, Operand(handle_));
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
-  __ mov(ip, Operand(handle_));
-  __ push(ip);
-}
-
-
-static MemOperand ToMemOperand(SlotLocation* loc) {
-  switch (loc->type()) {
-    case Slot::PARAMETER: {
-      int count = CfgGlobals::current()->fun()->scope()->num_parameters();
-      return MemOperand(fp, (1 + count - loc->index()) * kPointerSize);
-    }
-    case Slot::LOCAL: {
-      const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
-      return MemOperand(fp, kOffset - loc->index() * kPointerSize);
-    }
-    default:
-      UNREACHABLE();
-      return MemOperand(r0);
-  }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
-  __ mov(ip, Operand(handle_));
-  __ str(ip, ToMemOperand(loc));
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
-  __ ldr(reg, ToMemOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
-  __ str(reg, ToMemOperand(this));
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
-  __ ldr(ip, ToMemOperand(this));
-  __ push(ip);  // Push will not destroy ip.
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
-  // Double dispatch.
-  value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
-  __ ldr(ip, ToMemOperand(this));
-  __ str(ip, ToMemOperand(loc));
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
-  switch (where_) {
-    case ACCUMULATOR:
-      if (!reg.is(r0)) __ mov(reg, r0);
-      break;
-    case STACK:
-      __ pop(reg);
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
-  switch (where_) {
-    case ACCUMULATOR:
-      if (!reg.is(r0)) __ mov(r0, reg);
-      break;
-    case STACK:
-      __ push(reg);
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
-  switch (where_) {
-    case ACCUMULATOR:
-      __ push(r0);
-      break;
-    case STACK:
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
-  switch (where_) {
-    case ACCUMULATOR:
-      value->Get(masm, r0);
-    case STACK:
-      value->Push(masm);
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
-  switch (where_) {
-    case ACCUMULATOR:
-      __ str(r0, ToMemOperand(loc));
-    case STACK:
-      __ pop(ip);
-      __ str(ip, ToMemOperand(loc));
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-#undef __
-
-} }  // namespace v8::internal
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 477ea05..cdd32f3 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1188,7 +1188,6 @@
 #endif
   VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Declaration");
-  CodeForStatementPosition(node);
   Variable* var = node->proxy()->var();
   ASSERT(var != NULL);  // must have been resolved
   Slot* slot = var->slot();
@@ -2811,7 +2810,6 @@
 #endif
   VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Assignment");
-  CodeForStatementPosition(node);
 
   { Reference target(this, node->target());
     if (target.is_illegal()) {
@@ -2909,13 +2907,11 @@
   VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Call");
 
+  Expression* function = node->expression();
   ZoneList<Expression*>* args = node->arguments();
 
-  CodeForStatementPosition(node);
   // Standard function call.
-
   // Check if the function is a variable or a property.
-  Expression* function = node->expression();
   Variable* var = function->AsVariableProxy()->AsVariable();
   Property* property = function->AsProperty();
 
@@ -2928,7 +2924,56 @@
   // is resolved in cache misses (this also holds for megamorphic calls).
   // ------------------------------------------------------------------------
 
-  if (var != NULL && !var->is_this() && var->is_global()) {
+  if (var != NULL && var->is_possibly_eval()) {
+    // ----------------------------------
+    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
+    // ----------------------------------
+
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+    // Prepare stack for call to resolved function.
+    LoadAndSpill(function);
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    frame_->EmitPush(r2);  // Slot for receiver
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      LoadAndSpill(args->at(i));
+    }
+
+    // Prepare stack for call to ResolvePossiblyDirectEval.
+    __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
+    frame_->EmitPush(r1);
+    if (arg_count > 0) {
+      __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+      frame_->EmitPush(r1);
+    } else {
+      frame_->EmitPush(r2);
+    }
+
+    // Resolve the call.
+    frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+    // Touch up stack with the right values for the function and the receiver.
+    __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
+    __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+    __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
+    __ str(r1, MemOperand(sp, arg_count * kPointerSize));
+
+    // Call the function.
+    CodeForSourcePosition(node->position());
+
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub call_function(arg_count, in_loop);
+    frame_->CallStub(&call_function, arg_count + 1);
+
+    __ ldr(cp, frame_->Context());
+    // Remove the function from the stack.
+    frame_->Drop();
+    frame_->EmitPush(r0);
+
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
     // ----------------------------------
     // JavaScript example: 'foo(1, 2, 3)'  // foo is global
     // ----------------------------------
@@ -3053,72 +3098,12 @@
 }
 
 
-void CodeGenerator::VisitCallEval(CallEval* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ CallEval");
-
-  // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
-  // the function we need to call and the receiver of the call.
-  // Then we call the resolved function using the given arguments.
-
-  ZoneList<Expression*>* args = node->arguments();
-  Expression* function = node->expression();
-
-  CodeForStatementPosition(node);
-
-  // Prepare stack for call to resolved function.
-  LoadAndSpill(function);
-  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-  frame_->EmitPush(r2);  // Slot for receiver
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    LoadAndSpill(args->at(i));
-  }
-
-  // Prepare stack for call to ResolvePossiblyDirectEval.
-  __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
-  frame_->EmitPush(r1);
-  if (arg_count > 0) {
-    __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
-    frame_->EmitPush(r1);
-  } else {
-    frame_->EmitPush(r2);
-  }
-
-  // Resolve the call.
-  frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
-  // Touch up stack with the right values for the function and the receiver.
-  __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
-  __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-  __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
-  __ str(r1, MemOperand(sp, arg_count * kPointerSize));
-
-  // Call the function.
-  CodeForSourcePosition(node->position());
-
-  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub call_function(arg_count, in_loop);
-  frame_->CallStub(&call_function, arg_count + 1);
-
-  __ ldr(cp, frame_->Context());
-  // Remove the function from the stack.
-  frame_->Drop();
-  frame_->EmitPush(r0);
-  ASSERT(frame_->height() == original_height + 1);
-}
-
-
 void CodeGenerator::VisitCallNew(CallNew* node) {
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
   VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ CallNew");
-  CodeForStatementPosition(node);
 
   // According to ECMA-262, section 11.2.2, page 44, the function
   // expression in new calls must be evaluated before the
@@ -4960,12 +4945,12 @@
     Register scratch2) {  // Another scratch register.
   // Allocate an object in the heap for the heap number and tag it as a heap
   // object.
-  __ AllocateObjectInNewSpace(HeapNumber::kSize / kPointerSize,
-                              result,
-                              scratch1,
-                              scratch2,
-                              need_gc,
-                              TAG_OBJECT);
+  __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
+                        result,
+                        scratch1,
+                        scratch2,
+                        need_gc,
+                        TAG_OBJECT);
 
   // Get heap number map and store it in the allocated object.
   __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
@@ -5076,11 +5061,14 @@
   // r5: Address of heap number for result.
   __ push(lr);   // For later.
   __ push(r5);   // Address of heap number that is answer.
+  __ AlignStack(0);
   // Call C routine that may not cause GC or other trouble.
   __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
   __ Call(r5);
+  __ pop(r4);  // Address of heap number.
+  __ cmp(r4, Operand(Smi::FromInt(0)));
+  __ pop(r4, eq);  // Conditional pop instruction to get rid of alignment push.
   // Store answer in the overwritable heap number.
-  __ pop(r4);
 #if !defined(USE_ARM_EABI)
   // Double returned in fp coprocessor register 0 and 1, encoded as register
   // cr8.  Offsets must be divisible by 4 for coprocessor so we need to
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index b28e965..1eb0932 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -370,7 +370,7 @@
   // information.
   void CodeForFunctionPosition(FunctionLiteral* fun);
   void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(AstNode* node);
+  void CodeForStatementPosition(Statement* node);
   void CodeForSourcePosition(int pos);
 
 #ifdef DEBUG
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 6dd9b8f..45c6540 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -291,27 +291,8 @@
 
   // Align the stack at this point.  After this point we have 5 pushes,
   // so in fact we have to unalign here!  See also the assert on the
-  // alignment immediately below.
-#if defined(V8_HOST_ARCH_ARM)
-  // Running on the real platform. Use the alignment as mandated by the local
-  // environment.
-  // Note: This will break if we ever start generating snapshots on one ARM
-  // platform for another ARM platform with a different alignment.
-  int activation_frame_alignment = OS::ActivationFrameAlignment();
-#else  // defined(V8_HOST_ARCH_ARM)
-  // If we are using the simulator then we should always align to the expected
-  // alignment. As the simulator is used to generate snapshots we do not know
-  // if the target platform will need alignment, so we will always align at
-  // this point here.
-  int activation_frame_alignment = 2 * kPointerSize;
-#endif  // defined(V8_HOST_ARCH_ARM)
-  if (activation_frame_alignment != kPointerSize) {
-    // This code needs to be made more general if this assert doesn't hold.
-    ASSERT(activation_frame_alignment == 2 * kPointerSize);
-    mov(r7, Operand(Smi::FromInt(0)));
-    tst(sp, Operand(activation_frame_alignment - 1));
-    push(r7, eq);  // Conditional push instruction.
-  }
+  // alignment in AlignStack.
+  AlignStack(1);
 
   // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
   stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
@@ -343,6 +324,30 @@
 }
 
 
+void MacroAssembler::AlignStack(int offset) {
+#if defined(V8_HOST_ARCH_ARM)
+  // Running on the real platform. Use the alignment as mandated by the local
+  // environment.
+  // Note: This will break if we ever start generating snapshots on one ARM
+  // platform for another ARM platform with a different alignment.
+  int activation_frame_alignment = OS::ActivationFrameAlignment();
+#else  // defined(V8_HOST_ARCH_ARM)
+  // If we are using the simulator then we should always align to the expected
+  // alignment. As the simulator is used to generate snapshots we do not know
+  // if the target platform will need alignment, so we will always align at
+  // this point here.
+  int activation_frame_alignment = 2 * kPointerSize;
+#endif  // defined(V8_HOST_ARCH_ARM)
+  if (activation_frame_alignment != kPointerSize) {
+    // This code needs to be made more general if this assert doesn't hold.
+    ASSERT(activation_frame_alignment == 2 * kPointerSize);
+    mov(r7, Operand(Smi::FromInt(0)));
+    tst(sp, Operand(activation_frame_alignment - offset));
+    push(r7, eq);  // Conditional push instruction.
+  }
+}
+
+
 void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Restore the memory copy of the registers by digging them out from
@@ -763,12 +768,12 @@
 }
 
 
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
-                                              Register result,
-                                              Register scratch1,
-                                              Register scratch2,
-                                              Label* gc_required,
-                                              AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+                                        Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
   ASSERT(!result.is(scratch1));
   ASSERT(!scratch1.is(scratch2));
 
@@ -813,12 +818,12 @@
 }
 
 
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
-                                              Register result,
-                                              Register scratch1,
-                                              Register scratch2,
-                                              Label* gc_required,
-                                              AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+                                        Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
   ASSERT(!result.is(scratch1));
   ASSERT(!scratch1.is(scratch2));
 
@@ -1001,11 +1006,11 @@
   // should remove this need and make the runtime routine entry code
   // smarter.
   mov(r0, Operand(num_arguments));
-  JumpToBuiltin(ext);
+  JumpToRuntime(ext);
 }
 
 
-void MacroAssembler::JumpToBuiltin(const ExternalReference& builtin) {
+void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
 #if defined(__thumb__)
   // Thumb mode builtin.
   ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
@@ -1046,7 +1051,6 @@
     int argc = Builtins::GetArgumentsCount(id);
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(false);
     Unresolved entry = { pc_offset() - kInstrSize, flags, name };
     unresolved_.Add(entry);
@@ -1064,7 +1068,6 @@
     int argc = Builtins::GetArgumentsCount(id);
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(true);
     Unresolved entry = { pc_offset() - kInstrSize, flags, name };
     unresolved_.Add(entry);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 03aa4d0..ee9d70d 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -96,6 +96,8 @@
   // Leave the current exit frame. Expects the return value in r0.
   void LeaveExitFrame(StackFrame::Type type);
 
+  // Align the stack by optionally pushing a Smi zero.
+  void AlignStack(int offset);
 
   // ---------------------------------------------------------------------------
   // JavaScript invokes
@@ -171,18 +173,18 @@
   // bytes). If the new space is exhausted control continues at the gc_required
   // label. The allocated object is returned in result. If the flag
   // tag_allocated_object is true the result is tagged as as a heap object.
-  void AllocateObjectInNewSpace(int object_size,
-                                Register result,
-                                Register scratch1,
-                                Register scratch2,
-                                Label* gc_required,
-                                AllocationFlags flags);
-  void AllocateObjectInNewSpace(Register object_size,
-                                Register result,
-                                Register scratch1,
-                                Register scratch2,
-                                Label* gc_required,
-                                AllocationFlags flags);
+  void AllocateInNewSpace(int object_size,
+                          Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Label* gc_required,
+                          AllocationFlags flags);
+  void AllocateInNewSpace(Register object_size,
+                          Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Label* gc_required,
+                          AllocationFlags flags);
 
   // Undo allocation in new space. The object passed and objects allocated after
   // it will no longer be allocated. The caller must make sure that no pointers
@@ -257,14 +259,14 @@
   void CallRuntime(Runtime::FunctionId fid, int num_arguments);
 
   // Tail call of a runtime routine (jump).
-  // Like JumpToBuiltin, but also takes care of passing the number
+  // Like JumpToRuntime, but also takes care of passing the number
   // of parameters.
   void TailCallRuntime(const ExternalReference& ext,
                        int num_arguments,
                        int result_size);
 
-  // Jump to the builtin routine.
-  void JumpToBuiltin(const ExternalReference& builtin);
+  // Jump to a runtime routine.
+  void JumpToRuntime(const ExternalReference& builtin);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
@@ -329,8 +331,16 @@
                       Label* done,
                       InvokeFlag flag);
 
-  // Get the code for the given builtin. Returns if able to resolve
-  // the function in the 'resolved' flag.
+  // Prepares for a call or jump to a builtin by doing two things:
+  // 1. Emits code that fetches the builtin's function object from the context
+  //    at runtime, and puts it in the register rdi.
+  // 2. Fetches the builtin's code object, and returns it in a handle, at
+  //    compile time, so that later code can emit instructions to jump or call
+  //    the builtin directly.  If the code object has not yet been created, it
+  //    returns the builtin code object for IllegalFunction, and sets the
+  //    output parameter "resolved" to false.  Code that uses the return value
+  //    should then add the address and the builtin name to the list of fixups
+  //    called unresolved_, which is fixed up by the bootstrapper.
   Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
 
   // Activation support.
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 70dfcd2..22bec82 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -409,7 +409,7 @@
 
 
 Simulator::Simulator() {
-  ASSERT(initialized_);
+  Initialize();
   // Setup simulator support first. Some of this information is needed to
   // setup the architecture state.
   size_t stack_size = 1 * 1024*1024;  // allocate 1MB for stack
@@ -501,6 +501,7 @@
 
 // Get the active Simulator for the current thread.
 Simulator* Simulator::current() {
+  Initialize();
   Simulator* sim = reinterpret_cast<Simulator*>(
       v8::internal::Thread::GetThreadLocal(simulator_key));
   if (sim == NULL) {
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 3917d6a..ff6bbf4 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -36,18 +36,23 @@
 #ifndef V8_ARM_SIMULATOR_ARM_H_
 #define V8_ARM_SIMULATOR_ARM_H_
 
+#include "allocation.h"
+
 #if defined(__arm__)
 
 // When running without a simulator we call the entry directly.
 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
   (entry(p0, p1, p2, p3, p4))
 
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
-  (reinterpret_cast<uintptr_t>(this) - limit)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on arm uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return c_limit;
+  }
+};
 
 
 // Call the generated regexp code directly. The entry function pointer should
@@ -64,12 +69,6 @@
       assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
                                                  p0, p1, p2, p3, p4))
 
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
-  (assembler::arm::Simulator::current()->StackLimit())
-
-
 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
   assembler::arm::Simulator::current()->Call( \
     FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
@@ -219,6 +218,20 @@
 
 } }  // namespace assembler::arm
 
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.  Setting the c_limit to indicate a very small
+// stack cause stack overflow errors, since the simulator ignores the input.
+// This is unlikely to be an issue in practice, though it might cause testing
+// trouble down the line.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return assembler::arm::Simulator::current()->StackLimit();
+  }
+};
+
+
 #endif  // defined(__arm__)
 
 #endif  // V8_ARM_SIMULATOR_ARM_H_
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 9e44cfa..8282655 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1390,12 +1390,12 @@
   // r2: initial map
   // r7: undefined
   __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-  __ AllocateObjectInNewSpace(r3,
-                              r4,
-                              r5,
-                              r6,
-                              &generic_stub_call,
-                              NO_ALLOCATION_FLAGS);
+  __ AllocateInNewSpace(r3,
+                        r4,
+                        r5,
+                        r6,
+                        &generic_stub_call,
+                        NO_ALLOCATION_FLAGS);
 
   // Allocated the JSObject, now initialize the fields. Map is set to initial
   // map and properties and elements are set to empty fixed array.
diff --git a/src/array.js b/src/array.js
index eb69f97..f8e63d0 100644
--- a/src/array.js
+++ b/src/array.js
@@ -709,6 +709,8 @@
     QuickSort(a, high_start, to);
   }
 
+  var length;
+
   // Copies elements in the range 0..length from obj's prototype chain
   // to obj itself, if obj has holes. Returns one more than the maximal index
   // of a prototype property.
@@ -826,7 +828,7 @@
     return first_undefined;
   }
 
-  var length = ToUint32(this.length);
+  length = ToUint32(this.length);
   if (length < 2) return this;
 
   var is_array = IS_ARRAY(this);
diff --git a/src/assembler.h b/src/assembler.h
index 827389a..323e06a 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -191,6 +191,7 @@
   INLINE(Address target_address());
   INLINE(void set_target_address(Address target));
   INLINE(Object* target_object());
+  INLINE(Handle<Object> target_object_handle(Assembler* origin));
   INLINE(Object** target_object_address());
   INLINE(void set_target_object(Object* target));
 
diff --git a/src/ast.cc b/src/ast.cc
index 2b60742..692bec0 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -40,7 +40,6 @@
 ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
 Property Property::this_property_(VariableProxySentinel::this_proxy(), NULL, 0);
 Call Call::sentinel_(NULL, NULL, 0);
-CallEval CallEval::sentinel_(NULL, NULL, 0);
 
 
 // ----------------------------------------------------------------------------
diff --git a/src/ast.h b/src/ast.h
index ea83712..6a1cdf5 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -85,7 +85,6 @@
   V(Throw)                                      \
   V(Property)                                   \
   V(Call)                                       \
-  V(CallEval)                                   \
   V(CallNew)                                    \
   V(CallRuntime)                                \
   V(UnaryOperation)                             \
@@ -116,7 +115,6 @@
 
 class AstNode: public ZoneObject {
  public:
-  AstNode(): statement_pos_(RelocInfo::kNoPosition) { }
   virtual ~AstNode() { }
   virtual void Accept(AstVisitor* v) = 0;
 
@@ -140,6 +138,17 @@
   virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
   virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
   virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
+};
+
+
+class Statement: public AstNode {
+ public:
+  Statement() : statement_pos_(RelocInfo::kNoPosition) {}
+
+  virtual Statement* AsStatement()  { return this; }
+  virtual ReturnStatement* AsReturnStatement() { return NULL; }
+
+  bool IsEmpty() { return AsEmptyStatement() != NULL; }
 
   void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
   int statement_pos() const { return statement_pos_; }
@@ -149,15 +158,6 @@
 };
 
 
-class Statement: public AstNode {
- public:
-  virtual Statement* AsStatement()  { return this; }
-  virtual ReturnStatement* AsReturnStatement() { return NULL; }
-
-  bool IsEmpty() { return AsEmptyStatement() != NULL; }
-};
-
-
 class Expression: public AstNode {
  public:
   virtual Expression* AsExpression()  { return this; }
@@ -954,12 +954,8 @@
 
 class Call: public Expression {
  public:
-  Call(Expression* expression,
-       ZoneList<Expression*>* arguments,
-       int pos)
-      : expression_(expression),
-        arguments_(arguments),
-        pos_(pos) { }
+  Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
+      : expression_(expression), arguments_(arguments), pos_(pos) { }
 
   virtual void Accept(AstVisitor* v);
 
@@ -981,30 +977,21 @@
 };
 
 
-class CallNew: public Call {
+class CallNew: public Expression {
  public:
   CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
-      : Call(expression, arguments, pos) { }
-
-  virtual void Accept(AstVisitor* v);
-};
-
-
-// The CallEval class represents a call of the form 'eval(...)' where eval
-// cannot be seen to be overwritten at compile time. It is potentially a
-// direct (i.e. not aliased) eval call. The real nature of the call is
-// determined at runtime.
-class CallEval: public Call {
- public:
-  CallEval(Expression* expression, ZoneList<Expression*>* arguments, int pos)
-      : Call(expression, arguments, pos) { }
+      : expression_(expression), arguments_(arguments), pos_(pos) { }
 
   virtual void Accept(AstVisitor* v);
 
-  static CallEval* sentinel() { return &sentinel_; }
+  Expression* expression() const { return expression_; }
+  ZoneList<Expression*>* arguments() const { return arguments_; }
+  int position() { return pos_; }
 
  private:
-  static CallEval sentinel_;
+  Expression* expression_;
+  ZoneList<Expression*>* arguments_;
+  int pos_;
 };
 
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 5f38485..43aa1a3 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -201,20 +201,13 @@
     }
     Code* code = Code::cast(code_[i]);
     Address pc = code->instruction_start() + pc_[i];
-    bool is_pc_relative = Bootstrapper::FixupFlagsIsPCRelative::decode(flags);
+    RelocInfo target(pc, RelocInfo::CODE_TARGET, 0);
     bool use_code_object = Bootstrapper::FixupFlagsUseCodeObject::decode(flags);
-
     if (use_code_object) {
-      if (is_pc_relative) {
-        Assembler::set_target_address_at(
-            pc, reinterpret_cast<Address>(f->code()));
-      } else {
-        *reinterpret_cast<Object**>(pc) = f->code();
-      }
+      target.set_target_object(f->code());
     } else {
-      Assembler::set_target_address_at(pc, f->code()->instruction_start());
+      target.set_target_address(f->code()->instruction_start());
     }
-
     LOG(StringEvent("resolved", name));
   }
   Clear();
@@ -1586,6 +1579,12 @@
 }
 
 
+// Called when the top-level V8 mutex is destroyed.
+void Bootstrapper::FreeThreadResources() {
+  ASSERT(Genesis::current() == NULL);
+}
+
+
 // Reserve space for statics needing saving and restoring.
 int Genesis::ArchiveSpacePerThread() {
   return sizeof(current_);
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 0d743e3..15fc88d 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -66,14 +66,14 @@
   static bool IsActive();
 
   // Encoding/decoding support for fixup flags.
-  class FixupFlagsIsPCRelative: public BitField<bool, 0, 1> {};
-  class FixupFlagsUseCodeObject: public BitField<bool, 1, 1> {};
-  class FixupFlagsArgumentsCount: public BitField<uint32_t, 2, 32-2> {};
+  class FixupFlagsUseCodeObject: public BitField<bool, 0, 1> {};
+  class FixupFlagsArgumentsCount: public BitField<uint32_t, 1, 32-1> {};
 
   // Support for thread preemption.
   static int ArchiveSpacePerThread();
   static char* ArchiveState(char* to);
   static char* RestoreState(char* from);
+  static void FreeThreadResources();
 };
 
 }}  // namespace v8::internal
diff --git a/src/builtins.cc b/src/builtins.cc
index 5fe4ba9..afb5427 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -172,7 +172,9 @@
   }
 
   // Optimize the case where there are no parameters passed.
-  if (args.length() == 1) return array->Initialize(4);
+  if (args.length() == 1) {
+    return array->Initialize(JSArray::kPreallocatedArrayElements);
+  }
 
   // Take the arguments as elements.
   int number_of_elements = args.length() - 1;
diff --git a/src/cfg.cc b/src/cfg.cc
deleted file mode 100644
index d2dff52..0000000
--- a/src/cfg.cc
+++ /dev/null
@@ -1,763 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "bootstrapper.h"
-#include "cfg.h"
-#include "scopeinfo.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-
-CfgGlobals* CfgGlobals::top_ = NULL;
-
-
-CfgGlobals::CfgGlobals(FunctionLiteral* fun)
-    : global_fun_(fun),
-      global_exit_(new ExitNode()),
-      nowhere_(new Nowhere()),
-#ifdef DEBUG
-      node_counter_(0),
-      temp_counter_(0),
-#endif
-      previous_(top_) {
-  top_ = this;
-}
-
-
-#define BAILOUT(reason)                         \
-  do { return NULL; } while (false)
-
-Cfg* Cfg::Build() {
-  FunctionLiteral* fun = CfgGlobals::current()->fun();
-  if (fun->scope()->num_heap_slots() > 0) {
-    BAILOUT("function has context slots");
-  }
-  if (fun->scope()->num_stack_slots() > kBitsPerPointer) {
-    BAILOUT("function has too many locals");
-  }
-  if (fun->scope()->num_parameters() > kBitsPerPointer - 1) {
-    BAILOUT("function has too many parameters");
-  }
-  if (fun->scope()->arguments() != NULL) {
-    BAILOUT("function uses .arguments");
-  }
-
-  ZoneList<Statement*>* body = fun->body();
-  if (body->is_empty()) {
-    BAILOUT("empty function body");
-  }
-
-  StatementCfgBuilder builder;
-  builder.VisitStatements(body);
-  Cfg* graph = builder.graph();
-  if (graph == NULL) {
-    BAILOUT("unsupported statement type");
-  }
-  if (graph->is_empty()) {
-    BAILOUT("function body produces empty cfg");
-  }
-  if (graph->has_exit()) {
-    BAILOUT("control path without explicit return");
-  }
-  graph->PrependEntryNode();
-  return graph;
-}
-
-#undef BAILOUT
-
-
-void Cfg::PrependEntryNode() {
-  ASSERT(!is_empty());
-  entry_ = new EntryNode(InstructionBlock::cast(entry()));
-}
-
-
-void Cfg::Append(Instruction* instr) {
-  ASSERT(is_empty() || has_exit());
-  if (is_empty()) {
-    entry_ = exit_ = new InstructionBlock();
-  }
-  InstructionBlock::cast(exit_)->Append(instr);
-}
-
-
-void Cfg::AppendReturnInstruction(Value* value) {
-  Append(new ReturnInstr(value));
-  ExitNode* global_exit = CfgGlobals::current()->exit();
-  InstructionBlock::cast(exit_)->set_successor(global_exit);
-  exit_ = NULL;
-}
-
-
-void Cfg::Concatenate(Cfg* other) {
-  ASSERT(is_empty() || has_exit());
-  if (other->is_empty()) return;
-
-  if (is_empty()) {
-    entry_ = other->entry();
-    exit_ = other->exit();
-  } else {
-    // We have a pair of nonempty fragments and this has an available exit.
-    // Destructively glue the fragments together.
-    InstructionBlock* first = InstructionBlock::cast(exit_);
-    InstructionBlock* second = InstructionBlock::cast(other->entry());
-    first->instructions()->AddAll(*second->instructions());
-    if (second->successor() != NULL) {
-      first->set_successor(second->successor());
-      exit_ = other->exit();
-    }
-  }
-}
-
-
-void InstructionBlock::Unmark() {
-  if (is_marked_) {
-    is_marked_ = false;
-    successor_->Unmark();
-  }
-}
-
-
-void EntryNode::Unmark() {
-  if (is_marked_) {
-    is_marked_ = false;
-    successor_->Unmark();
-  }
-}
-
-
-void ExitNode::Unmark() {
-  is_marked_ = false;
-}
-
-
-Handle<Code> Cfg::Compile(Handle<Script> script) {
-  const int kInitialBufferSize = 4 * KB;
-  MacroAssembler* masm = new MacroAssembler(NULL, kInitialBufferSize);
-  entry()->Compile(masm);
-  entry()->Unmark();
-  CodeDesc desc;
-  masm->GetCode(&desc);
-  FunctionLiteral* fun = CfgGlobals::current()->fun();
-  ZoneScopeInfo info(fun->scope());
-  InLoopFlag in_loop = fun->loop_nesting() ? IN_LOOP : NOT_IN_LOOP;
-  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
-  Handle<Code> code = Factory::NewCode(desc, &info, flags, masm->CodeObject());
-
-  // Add unresolved entries in the code to the fixup list.
-  Bootstrapper::AddFixup(*code, masm);
-
-#ifdef ENABLE_DISASSEMBLER
-  if (FLAG_print_code) {
-    // Print the source code if available.
-    if (!script->IsUndefined() && !script->source()->IsUndefined()) {
-      PrintF("--- Raw source ---\n");
-      StringInputBuffer stream(String::cast(script->source()));
-      stream.Seek(fun->start_position());
-      // fun->end_position() points to the last character in the
-      // stream. We need to compensate by adding one to calculate the
-      // length.
-      int source_len = fun->end_position() - fun->start_position() + 1;
-      for (int i = 0; i < source_len; i++) {
-        if (stream.has_more()) PrintF("%c", stream.GetNext());
-      }
-      PrintF("\n\n");
-    }
-    PrintF("--- Code ---\n");
-    code->Disassemble(*fun->name()->ToCString());
-  }
-#endif
-
-  return code;
-}
-
-
-void ZeroOperandInstruction::FastAllocate(TempLocation* temp) {
-  temp->set_where(TempLocation::STACK);
-}
-
-
-void OneOperandInstruction::FastAllocate(TempLocation* temp) {
-  temp->set_where((temp == value_)
-                  ? TempLocation::ACCUMULATOR
-                  : TempLocation::STACK);
-}
-
-
-void TwoOperandInstruction::FastAllocate(TempLocation* temp) {
-  temp->set_where((temp == value0_ || temp == value1_)
-                  ? TempLocation::ACCUMULATOR
-                  : TempLocation::STACK);
-}
-
-
-void PositionInstr::Compile(MacroAssembler* masm) {
-  if (FLAG_debug_info && pos_ != RelocInfo::kNoPosition) {
-    masm->RecordStatementPosition(pos_);
-    masm->RecordPosition(pos_);
-  }
-}
-
-
-void MoveInstr::Compile(MacroAssembler* masm) {
-  location()->Move(masm, value());
-}
-
-
-// The expression builder should not be used for declarations or statements.
-void ExpressionCfgBuilder::VisitDeclaration(Declaration* decl) {
-  UNREACHABLE();
-}
-
-#define DEFINE_VISIT(type)                                              \
-  void ExpressionCfgBuilder::Visit##type(type* stmt) { UNREACHABLE(); }
-STATEMENT_NODE_LIST(DEFINE_VISIT)
-#undef DEFINE_VISIT
-
-
-// Macros (temporarily) handling unsupported expression types.
-#define BAILOUT(reason)                         \
-  do {                                          \
-    graph_ = NULL;                              \
-    return;                                     \
-  } while (false)
-
-void ExpressionCfgBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
-  BAILOUT("FunctionLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
-  BAILOUT("FunctionBoilerplateLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitConditional(Conditional* expr) {
-  BAILOUT("Conditional");
-}
-
-
-void ExpressionCfgBuilder::VisitSlot(Slot* expr) {
-  BAILOUT("Slot");
-}
-
-
-void ExpressionCfgBuilder::VisitVariableProxy(VariableProxy* expr) {
-  Expression* rewrite = expr->var()->rewrite();
-  if (rewrite == NULL || rewrite->AsSlot() == NULL) {
-    BAILOUT("unsupported variable (not a slot)");
-  }
-  Slot* slot = rewrite->AsSlot();
-  if (slot->type() != Slot::PARAMETER && slot->type() != Slot::LOCAL) {
-    BAILOUT("unsupported slot type (not a parameter or local)");
-  }
-  // Ignore the passed destination.
-  value_ = new SlotLocation(slot->type(), slot->index());
-}
-
-
-void ExpressionCfgBuilder::VisitLiteral(Literal* expr) {
-  // Ignore the passed destination.
-  value_ = new Constant(expr->handle());
-}
-
-
-void ExpressionCfgBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
-  BAILOUT("RegExpLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
-  BAILOUT("ObjectLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
-  BAILOUT("ArrayLiteral");
-}
-
-
-void ExpressionCfgBuilder::VisitCatchExtensionObject(
-    CatchExtensionObject* expr) {
-  BAILOUT("CatchExtensionObject");
-}
-
-
-void ExpressionCfgBuilder::VisitAssignment(Assignment* expr) {
-  if (expr->op() != Token::ASSIGN && expr->op() != Token::INIT_VAR) {
-    BAILOUT("unsupported compound assignment");
-  }
-  Expression* lhs = expr->target();
-  if (lhs->AsProperty() != NULL) {
-    BAILOUT("unsupported property assignment");
-  }
-
-  Variable* var = lhs->AsVariableProxy()->AsVariable();
-  if (var == NULL) {
-    BAILOUT("unsupported invalid left-hand side");
-  }
-  if (var->is_global()) {
-    BAILOUT("unsupported global variable");
-  }
-  Slot* slot = var->slot();
-  ASSERT(slot != NULL);
-  if (slot->type() != Slot::PARAMETER && slot->type() != Slot::LOCAL) {
-    BAILOUT("unsupported slot lhs (not a parameter or local)");
-  }
-
-  // Parameter and local slot assignments.
-  ExpressionCfgBuilder builder;
-  SlotLocation* loc = new SlotLocation(slot->type(), slot->index());
-  builder.Build(expr->value(), loc);
-  if (builder.graph() == NULL) {
-    BAILOUT("unsupported expression in assignment");
-  }
-  // If the expression did not come back in the slot location, append
-  // a move to the CFG.
-  graph_ = builder.graph();
-  if (builder.value() != loc) {
-    graph()->Append(new MoveInstr(loc, builder.value()));
-  }
-  // Record the assignment.
-  assigned_vars_.AddElement(loc);
-  // Ignore the destination passed to us.
-  value_ = loc;
-}
-
-
-void ExpressionCfgBuilder::VisitThrow(Throw* expr) {
-  BAILOUT("Throw");
-}
-
-
-void ExpressionCfgBuilder::VisitProperty(Property* expr) {
-  ExpressionCfgBuilder object, key;
-  object.Build(expr->obj(), NULL);
-  if (object.graph() == NULL) {
-    BAILOUT("unsupported object subexpression in propload");
-  }
-  key.Build(expr->key(), NULL);
-  if (key.graph() == NULL) {
-    BAILOUT("unsupported key subexpression in propload");
-  }
-
-  if (destination_ == NULL) destination_ = new TempLocation();
-
-  graph_ = object.graph();
-  // Insert a move to a fresh temporary if the object value is in a slot
-  // that's assigned in the key.
-  Location* temp = NULL;
-  if (object.value()->is_slot() &&
-      key.assigned_vars()->Contains(SlotLocation::cast(object.value()))) {
-    temp = new TempLocation();
-    graph()->Append(new MoveInstr(temp, object.value()));
-  }
-  graph()->Concatenate(key.graph());
-  graph()->Append(new PropLoadInstr(destination_,
-                                    temp == NULL ? object.value() : temp,
-                                    key.value()));
-
-  assigned_vars_ = *object.assigned_vars();
-  assigned_vars()->Union(key.assigned_vars());
-
-  value_ = destination_;
-}
-
-
-void ExpressionCfgBuilder::VisitCall(Call* expr) {
-  BAILOUT("Call");
-}
-
-
-void ExpressionCfgBuilder::VisitCallEval(CallEval* expr) {
-  BAILOUT("CallEval");
-}
-
-
-void ExpressionCfgBuilder::VisitCallNew(CallNew* expr) {
-  BAILOUT("CallNew");
-}
-
-
-void ExpressionCfgBuilder::VisitCallRuntime(CallRuntime* expr) {
-  BAILOUT("CallRuntime");
-}
-
-
-void ExpressionCfgBuilder::VisitUnaryOperation(UnaryOperation* expr) {
-  BAILOUT("UnaryOperation");
-}
-
-
-void ExpressionCfgBuilder::VisitCountOperation(CountOperation* expr) {
-  BAILOUT("CountOperation");
-}
-
-
-void ExpressionCfgBuilder::VisitBinaryOperation(BinaryOperation* expr) {
-  Token::Value op = expr->op();
-  switch (op) {
-    case Token::COMMA:
-    case Token::OR:
-    case Token::AND:
-      BAILOUT("unsupported binary operation");
-
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-    case Token::SHL:
-    case Token::SAR:
-    case Token::SHR:
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD: {
-      ExpressionCfgBuilder left, right;
-      left.Build(expr->left(), NULL);
-      if (left.graph() == NULL) {
-        BAILOUT("unsupported left subexpression in binop");
-      }
-      right.Build(expr->right(), NULL);
-      if (right.graph() == NULL) {
-        BAILOUT("unsupported right subexpression in binop");
-      }
-
-      if (destination_ == NULL) destination_ = new TempLocation();
-
-      graph_ = left.graph();
-      // Insert a move to a fresh temporary if the left value is in a
-      // slot that's assigned on the right.
-      Location* temp = NULL;
-      if (left.value()->is_slot() &&
-          right.assigned_vars()->Contains(SlotLocation::cast(left.value()))) {
-        temp = new TempLocation();
-        graph()->Append(new MoveInstr(temp, left.value()));
-      }
-      graph()->Concatenate(right.graph());
-      graph()->Append(new BinaryOpInstr(destination_, op,
-                                        temp == NULL ? left.value() : temp,
-                                        right.value()));
-
-      assigned_vars_ = *left.assigned_vars();
-      assigned_vars()->Union(right.assigned_vars());
-
-      value_ = destination_;
-      return;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void ExpressionCfgBuilder::VisitCompareOperation(CompareOperation* expr) {
-  BAILOUT("CompareOperation");
-}
-
-
-void ExpressionCfgBuilder::VisitThisFunction(ThisFunction* expr) {
-  BAILOUT("ThisFunction");
-}
-
-#undef BAILOUT
-
-
-// Macros (temporarily) handling unsupported statement types.
-#define BAILOUT(reason)                         \
-  do {                                          \
-    graph_ = NULL;                              \
-    return;                                     \
-  } while (false)
-
-#define CHECK_BAILOUT()                         \
-  if (graph() == NULL) { return; } else {}
-
-void StatementCfgBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
-  for (int i = 0, len = stmts->length(); i < len; i++) {
-    Visit(stmts->at(i));
-    CHECK_BAILOUT();
-    if (!graph()->has_exit()) return;
-  }
-}
-
-
-// The statement builder should not be used for declarations or expressions.
-void StatementCfgBuilder::VisitDeclaration(Declaration* decl) { UNREACHABLE(); }
-
-#define DEFINE_VISIT(type)                                      \
-  void StatementCfgBuilder::Visit##type(type* expr) { UNREACHABLE(); }
-EXPRESSION_NODE_LIST(DEFINE_VISIT)
-#undef DEFINE_VISIT
-
-
-void StatementCfgBuilder::VisitBlock(Block* stmt) {
-  VisitStatements(stmt->statements());
-}
-
-
-void StatementCfgBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
-  ExpressionCfgBuilder builder;
-  builder.Build(stmt->expression(), CfgGlobals::current()->nowhere());
-  if (builder.graph() == NULL) {
-    BAILOUT("unsupported expression in expression statement");
-  }
-  graph()->Append(new PositionInstr(stmt->statement_pos()));
-  graph()->Concatenate(builder.graph());
-}
-
-
-void StatementCfgBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
-  // Nothing to do.
-}
-
-
-void StatementCfgBuilder::VisitIfStatement(IfStatement* stmt) {
-  BAILOUT("IfStatement");
-}
-
-
-void StatementCfgBuilder::VisitContinueStatement(ContinueStatement* stmt) {
-  BAILOUT("ContinueStatement");
-}
-
-
-void StatementCfgBuilder::VisitBreakStatement(BreakStatement* stmt) {
-  BAILOUT("BreakStatement");
-}
-
-
-void StatementCfgBuilder::VisitReturnStatement(ReturnStatement* stmt) {
-  ExpressionCfgBuilder builder;
-  builder.Build(stmt->expression(), NULL);
-  if (builder.graph() == NULL) {
-    BAILOUT("unsupported expression in return statement");
-  }
-
-  graph()->Append(new PositionInstr(stmt->statement_pos()));
-  graph()->Concatenate(builder.graph());
-  graph()->AppendReturnInstruction(builder.value());
-}
-
-
-void StatementCfgBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
-  BAILOUT("WithEnterStatement");
-}
-
-
-void StatementCfgBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
-  BAILOUT("WithExitStatement");
-}
-
-
-void StatementCfgBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
-  BAILOUT("SwitchStatement");
-}
-
-
-void StatementCfgBuilder::VisitLoopStatement(LoopStatement* stmt) {
-  BAILOUT("LoopStatement");
-}
-
-
-void StatementCfgBuilder::VisitForInStatement(ForInStatement* stmt) {
-  BAILOUT("ForInStatement");
-}
-
-
-void StatementCfgBuilder::VisitTryCatch(TryCatch* stmt) {
-  BAILOUT("TryCatch");
-}
-
-
-void StatementCfgBuilder::VisitTryFinally(TryFinally* stmt) {
-  BAILOUT("TryFinally");
-}
-
-
-void StatementCfgBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
-  BAILOUT("DebuggerStatement");
-}
-
-
-#ifdef DEBUG
-// CFG printing support (via depth-first, preorder block traversal).
-
-void Cfg::Print() {
-  entry_->Print();
-  entry_->Unmark();
-}
-
-
-void Constant::Print() {
-  PrintF("Constant ");
-  handle_->Print();
-}
-
-
-void Nowhere::Print() {
-  PrintF("Nowhere");
-}
-
-
-void SlotLocation::Print() {
-  PrintF("Slot ");
-  switch (type_) {
-    case Slot::PARAMETER:
-      PrintF("(PARAMETER, %d)", index_);
-      break;
-    case Slot::LOCAL:
-      PrintF("(LOCAL, %d)", index_);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::Print() {
-  PrintF("Temp %d", number());
-}
-
-
-void OneOperandInstruction::Print() {
-  PrintF("(");
-  location()->Print();
-  PrintF(", ");
-  value_->Print();
-  PrintF(")");
-}
-
-
-void TwoOperandInstruction::Print() {
-  PrintF("(");
-  location()->Print();
-  PrintF(", ");
-  value0_->Print();
-  PrintF(", ");
-  value1_->Print();
-  PrintF(")");
-}
-
-
-void MoveInstr::Print() {
-  PrintF("Move              ");
-  OneOperandInstruction::Print();
-  PrintF("\n");
-}
-
-
-void PropLoadInstr::Print() {
-  PrintF("PropLoad          ");
-  TwoOperandInstruction::Print();
-  PrintF("\n");
-}
-
-
-void BinaryOpInstr::Print() {
-  switch (op()) {
-    case Token::OR:
-      // Two character operand.
-      PrintF("BinaryOp[OR]      ");
-      break;
-    case Token::AND:
-    case Token::SHL:
-    case Token::SAR:
-    case Token::SHR:
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-      // Three character operands.
-      PrintF("BinaryOp[%s]     ", Token::Name(op()));
-      break;
-    case Token::COMMA:
-      // Five character operand.
-      PrintF("BinaryOp[COMMA]   ");
-      break;
-    case Token::BIT_OR:
-      // Six character operand.
-      PrintF("BinaryOp[BIT_OR]  ");
-      break;
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-      // Seven character operands.
-      PrintF("BinaryOp[%s] ", Token::Name(op()));
-      break;
-    default:
-      UNREACHABLE();
-  }
-  TwoOperandInstruction::Print();
-  PrintF("\n");
-}
-
-
-void ReturnInstr::Print() {
-  PrintF("Return            ");
-  OneOperandInstruction::Print();
-  PrintF("\n");
-}
-
-
-void InstructionBlock::Print() {
-  if (!is_marked_) {
-    is_marked_ = true;
-    PrintF("L%d:\n", number());
-    for (int i = 0, len = instructions_.length(); i < len; i++) {
-      instructions_[i]->Print();
-    }
-    PrintF("Goto              L%d\n\n", successor_->number());
-    successor_->Print();
-  }
-}
-
-
-void EntryNode::Print() {
-  if (!is_marked_) {
-    is_marked_ = true;
-    successor_->Print();
-  }
-}
-
-
-void ExitNode::Print() {
-  if (!is_marked_) {
-    is_marked_ = true;
-    PrintF("L%d:\nExit\n\n", number());
-  }
-}
-
-#endif  // DEBUG
-
-} }  // namespace v8::internal
diff --git a/src/cfg.h b/src/cfg.h
deleted file mode 100644
index 0eb0f92..0000000
--- a/src/cfg.h
+++ /dev/null
@@ -1,871 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_CFG_H_
-#define V8_CFG_H_
-
-#include "ast.h"
-
-namespace v8 {
-namespace internal {
-
-class ExitNode;
-class Location;
-
-// Translate a source AST into a control-flow graph (CFG).  The CFG contains
-// single-entry, single-exit blocks of straight-line instructions and
-// administrative nodes.
-//
-// Instructions are described by the following grammar.
-//
-// <Instruction> ::=
-//     Move <Location> <Value>
-//   | PropLoad <Location> <Value> <Value>
-//   | BinaryOp <Location> Token::Value <Value> <Value>
-//   | Return Nowhere <Value>
-//   | Position <Int>
-//
-// Values are trivial expressions:
-//
-// <Value> ::= Constant | <Location>
-//
-// Locations are storable values ('lvalues').  They can be slots,
-// compiler-generated temporaries, or the special location 'Nowhere'
-// indicating that no value is needed.
-//
-// <Location> ::=
-//     SlotLocation Slot::Type <Index>
-//   | TempLocation
-//   | Nowhere
-
-
-// Administrative nodes: There are several types of 'administrative' nodes
-// that do not contain instructions and do not necessarily have a single
-// predecessor and a single successor.
-//
-// EntryNode: there is a distinguished entry node that has no predecessors
-// and a single successor.
-//
-// ExitNode: there is a distinguished exit node that has arbitrarily many
-// predecessors and no successor.
-//
-// JoinNode: join nodes have multiple predecessors and a single successor.
-//
-// BranchNode: branch nodes have a single predecessor and multiple
-// successors.
-
-
-// A convenient class to keep 'global' values when building a CFG.  Since
-// CFG construction can be invoked recursively, CFG globals are stacked.
-class CfgGlobals BASE_EMBEDDED {
- public:
-  explicit CfgGlobals(FunctionLiteral* fun);
-
-  ~CfgGlobals() { top_ = previous_; }
-
-  static CfgGlobals* current() {
-    ASSERT(top_ != NULL);
-    return top_;
-  }
-
-  // The function currently being compiled.
-  FunctionLiteral* fun() { return global_fun_; }
-
-  // The shared global exit node for all exits from the function.
-  ExitNode* exit() { return global_exit_; }
-
-  // A singleton.
-  Location* nowhere() { return nowhere_; }
-
-#ifdef DEBUG
-  int next_node_number() { return node_counter_++; }
-  int next_temp_number() { return temp_counter_++; }
-#endif
-
- private:
-  static CfgGlobals* top_;
-  FunctionLiteral* global_fun_;
-  ExitNode* global_exit_;
-  Location* nowhere_;
-
-#ifdef DEBUG
-  // Used to number nodes and temporaries when printing.
-  int node_counter_;
-  int temp_counter_;
-#endif
-
-  CfgGlobals* previous_;
-};
-
-
-class SlotLocation;
-
-// Values represent trivial source expressions: ones with no side effects
-// and that do not require code to be generated.
-class Value : public ZoneObject {
- public:
-  virtual ~Value() {}
-
-  // Predicates:
-
-  virtual bool is_temporary() { return false; }
-  virtual bool is_slot() { return false; }
-  virtual bool is_constant() { return false; }
-
-  // True if the value is a temporary allocated to the stack in
-  // fast-compilation mode.
-  virtual bool is_on_stack() { return false; }
-
-  // Support for fast-compilation mode:
-
-  // Move the value into a register.
-  virtual void Get(MacroAssembler* masm, Register reg) = 0;
-
-  // Push the value on the stack.
-  virtual void Push(MacroAssembler* masm) = 0;
-
-  // Move the value into a slot location.
-  virtual void MoveToSlot(MacroAssembler* masm, SlotLocation* loc) = 0;
-
-#ifdef DEBUG
-  virtual void Print() = 0;
-#endif
-};
-
-
-// A compile-time constant that appeared as a literal in the source AST.
-class Constant : public Value {
- public:
-  explicit Constant(Handle<Object> handle) : handle_(handle) {}
-
-  // Cast accessor.
-  static Constant* cast(Value* value) {
-    ASSERT(value->is_constant());
-    return reinterpret_cast<Constant*>(value);
-  }
-
-  // Accessors.
-  Handle<Object> handle() { return handle_; }
-
-  // Predicates.
-  bool is_constant() { return true; }
-
-  // Support for fast-compilation mode.
-  void Get(MacroAssembler* masm, Register reg);
-  void Push(MacroAssembler* masm);
-  void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
-
-#ifdef DEBUG
-  void Print();
-#endif
-
- private:
-  Handle<Object> handle_;
-};
-
-
-// Locations are values that can be stored into ('lvalues').
-class Location : public Value {
- public:
-  virtual ~Location() {}
-
-  // Static factory function returning the singleton nowhere location.
-  static Location* Nowhere() {
-    return CfgGlobals::current()->nowhere();
-  }
-
-  // Support for fast-compilation mode:
-
-  // Assumes temporaries have been allocated.
-  virtual void Get(MacroAssembler* masm, Register reg) = 0;
-
-  // Store the value in a register to the location.  Assumes temporaries
-  // have been allocated.
-  virtual void Set(MacroAssembler* masm, Register reg) = 0;
-
-  // Assumes temporaries have been allocated, and if the value is a
-  // temporary it was not allocated to the stack.
-  virtual void Push(MacroAssembler* masm) = 0;
-
-  // Emit code to move a value into this location.
-  virtual void Move(MacroAssembler* masm, Value* value) = 0;
-
-#ifdef DEBUG
-  virtual void Print() = 0;
-#endif
-};
-
-
-// Nowhere is a special (singleton) location that indicates the value of a
-// computation is not needed (though its side effects are).
-class Nowhere : public Location {
- public:
-  // We should not try to emit code to read Nowhere.
-  void Get(MacroAssembler* masm, Register reg) { UNREACHABLE(); }
-  void Push(MacroAssembler* masm) { UNREACHABLE(); }
-  void MoveToSlot(MacroAssembler* masm, SlotLocation* loc) { UNREACHABLE(); }
-
-  // Setting Nowhere is ignored.
-  void Set(MacroAssembler* masm, Register reg) {}
-  void Move(MacroAssembler* masm, Value* value) {}
-
-#ifdef DEBUG
-  void Print();
-#endif
-
- private:
-  Nowhere() {}
-
-  friend class CfgGlobals;
-};
-
-
-// SlotLocations represent parameters and stack-allocated (i.e.,
-// non-context) local variables.
-class SlotLocation : public Location {
- public:
-  SlotLocation(Slot::Type type, int index) : type_(type), index_(index) {}
-
-  // Cast accessor.
-  static SlotLocation* cast(Value* value) {
-    ASSERT(value->is_slot());
-    return reinterpret_cast<SlotLocation*>(value);
-  }
-
-  // Accessors.
-  Slot::Type type() { return type_; }
-  int index() { return index_; }
-
-  // Predicates.
-  bool is_slot() { return true; }
-
-  // Support for fast-compilation mode.
-  void Get(MacroAssembler* masm, Register reg);
-  void Set(MacroAssembler* masm, Register reg);
-  void Push(MacroAssembler* masm);
-  void Move(MacroAssembler* masm, Value* value);
-  void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
-
-#ifdef DEBUG
-  void Print();
-#endif
-
- private:
-  Slot::Type type_;
-  int index_;
-};
-
-
-// TempLocations represent compiler generated temporaries.  They are
-// allocated to registers or memory either before code generation (in the
-// optimized-for-speed compiler) or on the fly during code generation (in
-// the optimized-for-space compiler).
-class TempLocation : public Location {
- public:
-  // Fast-compilation mode allocation decisions.
-  enum Where {
-    NOT_ALLOCATED,  // Not yet allocated.
-    ACCUMULATOR,    // Allocated to the dedicated accumulator register.
-    STACK           //   "   "   "   "  stack.
-  };
-
-  TempLocation() : where_(NOT_ALLOCATED) {
-#ifdef DEBUG
-    number_ = -1;
-#endif
-  }
-
-  // Cast accessor.
-  static TempLocation* cast(Value* value) {
-    ASSERT(value->is_temporary());
-    return reinterpret_cast<TempLocation*>(value);
-  }
-
-  // Accessors.
-  Where where() { return where_; }
-  void set_where(Where where) {
-    ASSERT(where_ == TempLocation::NOT_ALLOCATED);
-    where_ = where;
-  }
-
-  // Predicates.
-  bool is_on_stack() { return where_ == STACK; }
-  bool is_temporary() { return true; }
-
-  // Support for fast-compilation mode.  Assume the temp has been allocated.
-  void Get(MacroAssembler* masm, Register reg);
-  void Set(MacroAssembler* masm, Register reg);
-  void Push(MacroAssembler* masm);
-  void Move(MacroAssembler* masm, Value* value);
-  void MoveToSlot(MacroAssembler* masm, SlotLocation* loc);
-
-#ifdef DEBUG
-  int number() {
-    if (number_ == -1) number_ = CfgGlobals::current()->next_temp_number();
-    return number_;
-  }
-
-  void Print();
-#endif
-
- private:
-  Where where_;
-
-#ifdef DEBUG
-  int number_;
-#endif
-};
-
-
-// Instructions are computations.  The represent non-trivial source
-// expressions: typically ones that have side effects and require code to
-// be generated.
-class Instruction : public ZoneObject {
- public:
-  // Accessors.
-  Location* location() { return location_; }
-  void set_location(Location* location) { location_ = location; }
-
-  // Support for fast-compilation mode:
-
-  // Emit code to perform the instruction.
-  virtual void Compile(MacroAssembler* masm) = 0;
-
-  // Allocate a temporary which is the result of the immediate predecessor
-  // instruction.  It is allocated to the accumulator register if it is used
-  // as an operand to this instruction, otherwise to the stack.
-  virtual void FastAllocate(TempLocation* temp) = 0;
-
-#ifdef DEBUG
-  virtual void Print() = 0;
-#endif
-
- protected:
-  // Every instruction has a location where its result is stored (which may
-  // be Nowhere).
-  explicit Instruction(Location* location) : location_(location) {}
-
-  virtual ~Instruction() {}
-
-  Location* location_;
-};
-
-
-// Base class of instructions that have no input operands.
-class ZeroOperandInstruction : public Instruction {
- public:
-  // Support for fast-compilation mode:
-  virtual void Compile(MacroAssembler* masm) = 0;
-  void FastAllocate(TempLocation* temp);
-
-#ifdef DEBUG
-  // Printing support: print the operands (nothing).
-  virtual void Print() {}
-#endif
-
- protected:
-  explicit ZeroOperandInstruction(Location* loc) : Instruction(loc) {}
-};
-
-
-// Base class of instructions that have a single input operand.
-class OneOperandInstruction : public Instruction {
- public:
-  // Support for fast-compilation mode:
-  virtual void Compile(MacroAssembler* masm) = 0;
-  void FastAllocate(TempLocation* temp);
-
-#ifdef DEBUG
-  // Printing support: print the operands.
-  virtual void Print();
-#endif
-
- protected:
-  OneOperandInstruction(Location* loc, Value* value)
-      : Instruction(loc), value_(value) {
-  }
-
-  Value* value_;
-};
-
-
-// Base class of instructions that have two input operands.
-class TwoOperandInstruction : public Instruction {
- public:
-  // Support for fast-compilation mode:
-  virtual void Compile(MacroAssembler* masm) = 0;
-  void FastAllocate(TempLocation* temp);
-
-#ifdef DEBUG
-  // Printing support: print the operands.
-  virtual void Print();
-#endif
-
- protected:
-  TwoOperandInstruction(Location* loc, Value* value0, Value* value1)
-      : Instruction(loc), value0_(value0), value1_(value1) {
-  }
-
-  Value* value0_;
-  Value* value1_;
-};
-
-
-// A phantom instruction that indicates the start of a statement.  It
-// causes the statement position to be recorded in the relocation
-// information but generates no code.
-class PositionInstr : public ZeroOperandInstruction {
- public:
-  explicit PositionInstr(int pos)
-      : ZeroOperandInstruction(CfgGlobals::current()->nowhere()), pos_(pos) {
-  }
-
-  // Support for fast-compilation mode.
-  void Compile(MacroAssembler* masm);
-
-  // This should not be called.  The last instruction of the previous
-  // statement should not have a temporary as its location.
-  void FastAllocate(TempLocation* temp) { UNREACHABLE(); }
-
-#ifdef DEBUG
-  // Printing support.  Print nothing.
-  void Print() {}
-#endif
-
- private:
-  int pos_;
-};
-
-
-// Move a value to a location.
-class MoveInstr : public OneOperandInstruction {
- public:
-  MoveInstr(Location* loc, Value* value)
-      : OneOperandInstruction(loc, value) {
-  }
-
-  // Accessors.
-  Value* value() { return value_; }
-
-  // Support for fast-compilation mode.
-  void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
-  // Printing support.
-  void Print();
-#endif
-};
-
-
-// Load a property from a receiver, leaving the result in a location.
-class PropLoadInstr : public TwoOperandInstruction {
- public:
-  PropLoadInstr(Location* loc, Value* object, Value* key)
-      : TwoOperandInstruction(loc, object, key) {
-  }
-
-  // Accessors.
-  Value* object() { return value0_; }
-  Value* key() { return value1_; }
-
-  // Support for fast-compilation mode.
-  void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
-  void Print();
-#endif
-};
-
-
-// Perform a (non-short-circuited) binary operation on a pair of values,
-// leaving the result in a location.
-class BinaryOpInstr : public TwoOperandInstruction {
- public:
-  BinaryOpInstr(Location* loc, Token::Value op, Value* left, Value* right)
-      : TwoOperandInstruction(loc, left, right), op_(op) {
-  }
-
-  // Accessors.
-  Value* left() { return value0_; }
-  Value* right() { return value1_; }
-  Token::Value op() { return op_; }
-
-  // Support for fast-compilation mode.
-  void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
-  void Print();
-#endif
-
- private:
-  Token::Value op_;
-};
-
-
-// Return a value.  Has the side effect of moving its value into the return
-// value register.  Can only occur as the last instruction in an instruction
-// block, and implies that the block is closed (cannot have instructions
-// appended or graph fragments concatenated to the end) and that the block's
-// successor is the global exit node for the current function.
-class ReturnInstr : public OneOperandInstruction {
- public:
-  explicit ReturnInstr(Value* value)
-      : OneOperandInstruction(CfgGlobals::current()->nowhere(), value) {
-  }
-
-  virtual ~ReturnInstr() {}
-
-  // Accessors.
-  Value* value() { return value_; }
-
-  // Support for fast-compilation mode.
-  void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
-  void Print();
-#endif
-};
-
-
-// Nodes make up control-flow graphs.
-class CfgNode : public ZoneObject {
- public:
-  CfgNode() : is_marked_(false) {
-#ifdef DEBUG
-    number_ = -1;
-#endif
-  }
-
-  virtual ~CfgNode() {}
-
-  // Because CFGs contain cycles, nodes support marking during traversal
-  // (e.g., for printing or compilation).  The traversal functions will mark
-  // unmarked nodes and backtrack if they encounter a marked one.  After a
-  // traversal, the graph should be explicitly unmarked by calling Unmark on
-  // the entry node.
-  bool is_marked() { return is_marked_; }
-  virtual void Unmark() = 0;
-
-  // Predicates:
-
-  // True if the node is an instruction block.
-  virtual bool is_block() { return false; }
-
-  // Support for fast-compilation mode.  Emit the instructions or control
-  // flow represented by the node.
-  virtual void Compile(MacroAssembler* masm) = 0;
-
-#ifdef DEBUG
-  int number() {
-    if (number_ == -1) number_ = CfgGlobals::current()->next_node_number();
-    return number_;
-  }
-
-  virtual void Print() = 0;
-#endif
-
- protected:
-  bool is_marked_;
-
-#ifdef DEBUG
-  int number_;
-#endif
-};
-
-
-// A block is a single-entry, single-exit block of instructions.
-class InstructionBlock : public CfgNode {
- public:
-  InstructionBlock() : successor_(NULL), instructions_(4) {}
-
-  virtual ~InstructionBlock() {}
-
-  void Unmark();
-
-  // Cast accessor.
-  static InstructionBlock* cast(CfgNode* node) {
-    ASSERT(node->is_block());
-    return reinterpret_cast<InstructionBlock*>(node);
-  }
-
-  bool is_block() { return true; }
-
-  // Accessors.
-  CfgNode* successor() { return successor_; }
-
-  void set_successor(CfgNode* succ) {
-    ASSERT(successor_ == NULL);
-    successor_ = succ;
-  }
-
-  ZoneList<Instruction*>* instructions() { return &instructions_; }
-
-  // Support for fast-compilation mode.
-  void Compile(MacroAssembler* masm);
-
-  // Add an instruction to the end of the block.
-  void Append(Instruction* instr) { instructions_.Add(instr); }
-
-#ifdef DEBUG
-  void Print();
-#endif
-
- private:
-  CfgNode* successor_;
-  ZoneList<Instruction*> instructions_;
-};
-
-
-// An entry node (one per function).
-class EntryNode : public CfgNode {
- public:
-  explicit EntryNode(InstructionBlock* succ) : successor_(succ) {}
-
-  virtual ~EntryNode() {}
-
-  void Unmark();
-
-  // Support for fast-compilation mode.
-  void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
-  void Print();
-#endif
-
- private:
-  InstructionBlock* successor_;
-};
-
-
-// An exit node (one per function).
-class ExitNode : public CfgNode {
- public:
-  ExitNode() {}
-
-  virtual ~ExitNode() {}
-
-  void Unmark();
-
-  // Support for fast-compilation mode.
-  void Compile(MacroAssembler* masm);
-
-#ifdef DEBUG
-  void Print();
-#endif
-};
-
-
-// A CFG consists of a linked structure of nodes.  Nodes are linked by
-// pointing to their successors, always beginning with a (single) entry node
-// (not necessarily of type EntryNode).  If it is still possible to add
-// nodes to the end of the graph (i.e., there is a (single) path that does
-// not end with the global exit node), then the CFG has an exit node as
-// well.
-//
-// The empty CFG is represented by a NULL entry and a NULL exit.
-//
-// We use the term 'open fragment' to mean a CFG whose entry and exits are
-// both instruction blocks.  It is always possible to add instructions and
-// nodes to the beginning or end of an open fragment.
-//
-// We use the term 'closed fragment' to mean a CFG whose entry is an
-// instruction block and whose exit is NULL (all paths go to the global
-// exit).
-//
-// We use the term 'fragment' to refer to a CFG that is known to be an open
-// or closed fragment.
-class Cfg : public ZoneObject {
- public:
-  // Create an empty CFG fragment.
-  Cfg() : entry_(NULL), exit_(NULL) {}
-
-  // Build the CFG for a function.  The returned CFG begins with an
-  // EntryNode and all paths end with the ExitNode.
-  static Cfg* Build();
-
-  // The entry and exit nodes of the CFG (not necessarily EntryNode and
-  // ExitNode).
-  CfgNode* entry() { return entry_; }
-  CfgNode* exit() { return exit_; }
-
-  // True if the CFG has no nodes.
-  bool is_empty() { return entry_ == NULL; }
-
-  // True if the CFG has an available exit node (i.e., it can be appended or
-  // concatenated to).
-  bool has_exit() { return exit_ != NULL; }
-
-  // Add an EntryNode to a CFG fragment.  It is no longer a fragment
-  // (instructions can no longer be prepended).
-  void PrependEntryNode();
-
-  // Append an instruction to the end of an open fragment.
-  void Append(Instruction* instr);
-
-  // Appends a return instruction to the end of an open fragment and make
-  // it a closed fragment (the exit's successor becomes global exit node).
-  void AppendReturnInstruction(Value* value);
-
-  // Glue an other CFG fragment to the end of this (open) fragment.
-  void Concatenate(Cfg* other);
-
-  // Support for compilation.  Compile the entire CFG.
-  Handle<Code> Compile(Handle<Script> script);
-
-#ifdef DEBUG
-  // Support for printing.
-  void Print();
-#endif
-
- private:
-  // Entry and exit nodes.
-  CfgNode* entry_;
-  CfgNode* exit_;
-};
-
-
-// An implementation of a set of locations (currently slot locations), most
-// of the operations are destructive.
-class LocationSet BASE_EMBEDDED {
- public:
-  // Construct an empty location set.
-  LocationSet() : parameters_(0), locals_(0) {}
-
-  // Raw accessors.
-  uintptr_t parameters() { return parameters_; }
-  uintptr_t locals() { return locals_; }
-
-  // Make this the empty set.
-  void Empty() {
-    parameters_ = locals_ = 0;
-  }
-
-  // Insert an element.
-  void AddElement(SlotLocation* location) {
-    if (location->type() == Slot::PARAMETER) {
-      // Parameter indexes begin with -1 ('this').
-      ASSERT(location->index() < kBitsPerPointer - 1);
-      parameters_ |= (1 << (location->index() + 1));
-    } else {
-      ASSERT(location->type() == Slot::LOCAL);
-      ASSERT(location->index() < kBitsPerPointer);
-      locals_ |= (1 << location->index());
-    }
-  }
-
-  // (Destructively) compute the union with another set.
-  void Union(LocationSet* other) {
-    parameters_ |= other->parameters();
-    locals_ |= other->locals();
-  }
-
-  bool Contains(SlotLocation* location) {
-    if (location->type() == Slot::PARAMETER) {
-      ASSERT(location->index() < kBitsPerPointer - 1);
-      return (parameters_ & (1 << (location->index() + 1)));
-    } else {
-      ASSERT(location->type() == Slot::LOCAL);
-      ASSERT(location->index() < kBitsPerPointer);
-      return (locals_ & (1 << location->index()));
-    }
-  }
-
- private:
-  uintptr_t parameters_;
-  uintptr_t locals_;
-};
-
-
-// An ExpressionCfgBuilder traverses an expression and returns an open CFG
-// fragment (currently a possibly empty list of instructions represented by
-// a singleton instruction block) and the expression's value.
-//
-// Failure to build the CFG is indicated by a NULL CFG.
-class ExpressionCfgBuilder : public AstVisitor {
- public:
-  ExpressionCfgBuilder() : destination_(NULL), value_(NULL), graph_(NULL) {}
-
-  // Result accessors.
-  Value* value() { return value_; }
-  Cfg* graph() { return graph_; }
-  LocationSet* assigned_vars() { return &assigned_vars_; }
-
-  // Build the cfg for an expression and remember its value.  The
-  // destination is a 'hint' where the value should go which may be ignored.
-  // NULL is used to indicate no preference.
-  //
-  // Concretely, if the expression needs to generate a temporary for its
-  // value, it should use the passed destination or generate one if NULL.
-  void Build(Expression* expr, Location* destination) {
-    value_ = NULL;
-    graph_ = new Cfg();
-    assigned_vars_.Empty();
-    destination_ = destination;
-    Visit(expr);
-  }
-
-  // AST node visitors.
-#define DECLARE_VISIT(type) void Visit##type(type* node);
-  AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
-  // State for the visitor.  Input parameter:
-  Location* destination_;
-
-  // Output parameters:
-  Value* value_;
-  Cfg* graph_;
-  LocationSet assigned_vars_;
-};
-
-
-// A StatementCfgBuilder maintains a CFG fragment accumulator.  When it
-// visits a statement, it concatenates the CFG for the statement to the end
-// of the accumulator.
-class StatementCfgBuilder : public AstVisitor {
- public:
-  StatementCfgBuilder() : graph_(new Cfg()) {}
-
-  Cfg* graph() { return graph_; }
-
-  void VisitStatements(ZoneList<Statement*>* stmts);
-
-  // AST node visitors.
-#define DECLARE_VISIT(type) void Visit##type(type* node);
-  AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
-  // State for the visitor.  Input/output parameter:
-  Cfg* graph_;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_CFG_H_
diff --git a/src/codegen.cc b/src/codegen.cc
index 9a00ae2..a18fa0f 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -469,44 +469,32 @@
 }
 
 
-void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) {
-    int pos = fun->start_position();
-    if (pos != RelocInfo::kNoPosition) {
-      masm()->RecordStatementPosition(pos);
-      masm()->RecordPosition(pos);
-    }
+static inline void RecordPositions(CodeGenerator* cgen, int pos) {
+  if (pos != RelocInfo::kNoPosition) {
+    cgen->masm()->RecordStatementPosition(pos);
+    cgen->masm()->RecordPosition(pos);
   }
 }
 
 
+void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
+  if (FLAG_debug_info) RecordPositions(this, fun->start_position());
+}
+
+
 void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) {
-    int pos = fun->end_position();
-    if (pos != RelocInfo::kNoPosition) {
-      masm()->RecordStatementPosition(pos);
-      masm()->RecordPosition(pos);
-    }
-  }
+  if (FLAG_debug_info) RecordPositions(this, fun->end_position());
 }
 
 
-void CodeGenerator::CodeForStatementPosition(AstNode* node) {
-  if (FLAG_debug_info) {
-    int pos = node->statement_pos();
-    if (pos != RelocInfo::kNoPosition) {
-      masm()->RecordStatementPosition(pos);
-      masm()->RecordPosition(pos);
-    }
-  }
+void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
+  if (FLAG_debug_info) RecordPositions(this, stmt->statement_pos());
 }
 
 
 void CodeGenerator::CodeForSourcePosition(int pos) {
-  if (FLAG_debug_info) {
-    if (pos != RelocInfo::kNoPosition) {
-      masm()->RecordPosition(pos);
-    }
+  if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
+    masm()->RecordPosition(pos);
   }
 }
 
diff --git a/src/compiler.cc b/src/compiler.cc
index 15f6479..6ba7a9a 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -28,7 +28,6 @@
 #include "v8.h"
 
 #include "bootstrapper.h"
-#include "cfg.h"
 #include "codegen-inl.h"
 #include "compilation-cache.h"
 #include "compiler.h"
@@ -79,22 +78,6 @@
     return Handle<Code>::null();
   }
 
-  if (FLAG_multipass) {
-    CfgGlobals scope(literal);
-    Cfg* cfg = Cfg::Build();
-#ifdef DEBUG
-    if (FLAG_print_cfg && cfg != NULL) {
-      SmartPointer<char> name = literal->name()->ToCString();
-      PrintF("Function \"%s\":\n", *name);
-      cfg->Print();
-      PrintF("\n");
-    }
-#endif
-    if (cfg != NULL) {
-      return cfg->Compile(script);
-    }
-  }
-
   // Generate code and return it.
   Handle<Code> result = CodeGenerator::MakeCode(literal, script, is_eval);
   return result;
@@ -121,8 +104,6 @@
                                        ScriptDataImpl* pre_data) {
   CompilationZoneScope zone_scope(DELETE_ON_EXIT);
 
-  // Make sure we have an initial stack limit.
-  StackGuard guard;
   PostponeInterruptsScope postpone;
 
   ASSERT(!i::Top::global_context().is_null());
@@ -351,8 +332,6 @@
   // The VM is in the COMPILER state until exiting this function.
   VMState state(COMPILER);
 
-  // Make sure we have an initial stack limit.
-  StackGuard guard;
   PostponeInterruptsScope postpone;
 
   // Compute name, source code and script data.
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index 3dba53a..9d5cace 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -65,6 +65,7 @@
   // Accept connections on the bound port.
   while (!terminate_) {
     bool ok = server_->Listen(1);
+    listening_->Signal();
     if (ok) {
       // Accept the new connection.
       Socket* client = server_->Accept();
@@ -93,6 +94,10 @@
 }
 
 
+void DebuggerAgent::WaitUntilListening() {
+  listening_->Wait();
+}
+
 void DebuggerAgent::CreateSession(Socket* client) {
   ScopedLock with(session_access_);
 
diff --git a/src/debug-agent.h b/src/debug-agent.h
index 04f883f..3647994 100644
--- a/src/debug-agent.h
+++ b/src/debug-agent.h
@@ -47,7 +47,8 @@
       : name_(StrDup(name)), port_(port),
         server_(OS::CreateSocket()), terminate_(false),
         session_access_(OS::CreateMutex()), session_(NULL),
-        terminate_now_(OS::CreateSemaphore(0)) {
+        terminate_now_(OS::CreateSemaphore(0)),
+        listening_(OS::CreateSemaphore(0)) {
     ASSERT(instance_ == NULL);
     instance_ = this;
   }
@@ -57,6 +58,7 @@
   }
 
   void Shutdown();
+  void WaitUntilListening();
 
  private:
   void Run();
@@ -72,6 +74,7 @@
   Mutex* session_access_;  // Mutex guarging access to session_.
   DebuggerAgentSession* session_;  // Current active session if any.
   Semaphore* terminate_now_;  // Semaphore to signal termination.
+  Semaphore* listening_;
 
   static DebuggerAgent* instance_;
 
diff --git a/src/debug-delay.js b/src/debug-delay.js
index ce70c75..cb789be 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// jsminify this file, js2c: jsmin
-
 // Default number of frames to include in the response to backtrace request.
 const kDefaultBacktraceLength = 10;
 
@@ -35,7 +33,7 @@
 // Regular expression to skip "crud" at the beginning of a source line which is
 // not really code. Currently the regular expression matches whitespace and
 // comments.
-const sourceLineBeginningSkip = /^(?:[ \v\h]*(?:\/\*.*?\*\/)*)*/;
+const sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
 
 // Debug events which can occour in the V8 JavaScript engine. These originate
 // from the API include file debug.h.
@@ -350,7 +348,7 @@
     if (!script.sourceColumnStart_) {
       script.sourceColumnStart_ = new Array(script.lineCount());
     }
-    
+
     // Fill cache if needed and get column where the actual source starts.
     if (IS_UNDEFINED(script.sourceColumnStart_[line])) {
       script.sourceColumnStart_[line] =
@@ -361,11 +359,11 @@
 
   // Convert the line and column into an absolute position within the script.
   var pos = Debug.findScriptSourcePosition(script, this.line(), column);
-  
+
   // If the position is not found in the script (the script might be shorter
   // than it used to be) just ignore it.
   if (pos === null) return;
-  
+
   // Create a break point object and set the break point.
   break_point = MakeBreakPoint(pos, this.line(), this.column(), this);
   break_point.setIgnoreCount(this.ignoreCount());
@@ -492,7 +490,7 @@
 // Returns the character position in a script based on a line number and an
 // optional position within that line.
 Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
-  var location = script.locationFromLine(opt_line, opt_column);  
+  var location = script.locationFromLine(opt_line, opt_column);
   return location ? location.position : null;
 }
 
@@ -944,7 +942,7 @@
   o.body = { uncaught: this.uncaught_,
              exception: MakeMirror(this.exception_)
            };
-           
+
   // Exceptions might happen whithout any JavaScript frames.
   if (this.exec_state_.frameCount() > 0) {
     o.body.sourceLine = this.sourceLine();
@@ -1097,7 +1095,7 @@
 function ProtocolMessage(request) {
   // Update sequence number.
   this.seq = next_response_seq++;
-  
+
   if (request) {
     // If message is based on a request this is a response. Fill the initial
     // response from the request.
@@ -1487,7 +1485,7 @@
     response.failed('Missing argument "groupId"');
     return;
   }
-  
+
   var cleared_break_points = [];
   var new_script_break_points = [];
   for (var i = 0; i < script_break_points.length; i++) {
@@ -1603,7 +1601,7 @@
     if (index < 0 || this.exec_state_.frameCount() <= index) {
       return response.failed('Invalid frame number');
     }
-    
+
     this.exec_state_.setSelectedFrame(request.arguments.number);
   }
   response.body = this.exec_state_.frame();
@@ -1633,7 +1631,7 @@
 
   // Get the frame for which the scopes are requested.
   var frame = this.frameForScopeRequest_(request);
-  
+
   // Fill all scopes for this frame.
   var total_scopes = frame.scopeCount();
   var scopes = [];
@@ -1750,7 +1748,7 @@
     includeSource = %ToBoolean(request.arguments.includeSource);
     response.setOption('includeSource', includeSource);
   }
-  
+
   // Lookup handles.
   var mirrors = {};
   for (var i = 0; i < handles.length; i++) {
diff --git a/src/debug.cc b/src/debug.cc
index 3c2bfa8..ec658d6 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -2498,6 +2498,11 @@
 }
 
 
+void Debugger::WaitForAgent() {
+  if (agent_ != NULL)
+    agent_->WaitUntilListening();
+}
+
 MessageImpl MessageImpl::NewEvent(DebugEvent event,
                                   bool running,
                                   Handle<JSObject> exec_state,
diff --git a/src/debug.h b/src/debug.h
index d6b2c08..29c2bc2 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -354,6 +354,7 @@
   static char* ArchiveDebug(char* to);
   static char* RestoreDebug(char* from);
   static int ArchiveSpacePerThread();
+  static void FreeThreadResources() { }
 
   // Mirror cache handling.
   static void ClearMirrorCache();
@@ -645,6 +646,9 @@
   // Stop the debugger agent.
   static void StopAgent();
 
+  // Blocks until the agent has started listening for connections
+  static void WaitForAgent();
+
   // Unload the debugger if possible. Only called when no debugger is currently
   // active.
   static void UnloadDebugger();
diff --git a/src/execution.cc b/src/execution.cc
index 04ec905..8bc6b74 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -61,9 +61,6 @@
   // Entering JavaScript.
   VMState state(JS);
 
-  // Guard the stack against too much recursion.
-  StackGuard guard;
-
   // Placeholder for return value.
   Object* value = reinterpret_cast<Object*>(kZapValue);
 
@@ -217,55 +214,6 @@
 StackGuard::ThreadLocal StackGuard::thread_local_;
 
 
-StackGuard::StackGuard() {
-  // NOTE: Overall the StackGuard code assumes that the stack grows towards
-  // lower addresses.
-  ExecutionAccess access;
-  if (thread_local_.nesting_++ == 0) {
-    // Initial StackGuard is being set. We will set the stack limits based on
-    // the current stack pointer allowing the stack to grow kLimitSize from
-    // here.
-
-    // Ensure that either the stack limits are unset (kIllegalLimit) or that
-    // they indicate a pending interruption. The interrupt limit will be
-    // temporarily reset through the code below and reestablished if the
-    // interrupt flags indicate that an interrupt is pending.
-    ASSERT(thread_local_.jslimit_ == kIllegalLimit ||
-           (thread_local_.jslimit_ == kInterruptLimit &&
-            thread_local_.interrupt_flags_ != 0));
-    ASSERT(thread_local_.climit_ == kIllegalLimit ||
-           (thread_local_.climit_ == kInterruptLimit &&
-            thread_local_.interrupt_flags_ != 0));
-
-    uintptr_t limit = GENERATED_CODE_STACK_LIMIT(kLimitSize);
-    thread_local_.initial_jslimit_ = thread_local_.jslimit_ = limit;
-    Heap::SetStackLimit(limit);
-    // NOTE: The check for overflow is not safe as there is no guarantee that
-    // the running thread has its stack in all memory up to address 0x00000000.
-    thread_local_.initial_climit_ = thread_local_.climit_ =
-        reinterpret_cast<uintptr_t>(this) >= kLimitSize ?
-            reinterpret_cast<uintptr_t>(this) - kLimitSize : 0;
-
-    if (thread_local_.interrupt_flags_ != 0) {
-      set_limits(kInterruptLimit, access);
-    }
-  }
-  // Ensure that proper limits have been set.
-  ASSERT(thread_local_.jslimit_ != kIllegalLimit &&
-         thread_local_.climit_ != kIllegalLimit);
-  ASSERT(thread_local_.initial_jslimit_ != kIllegalLimit &&
-         thread_local_.initial_climit_ != kIllegalLimit);
-}
-
-
-StackGuard::~StackGuard() {
-  ExecutionAccess access;
-  if (--thread_local_.nesting_ == 0) {
-    set_limits(kIllegalLimit, access);
-  }
-}
-
-
 bool StackGuard::IsStackOverflow() {
   ExecutionAccess access;
   return (thread_local_.jslimit_ != kInterruptLimit &&
@@ -285,15 +233,16 @@
   ExecutionAccess access;
   // If the current limits are special (eg due to a pending interrupt) then
   // leave them alone.
+  uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit);
   if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) {
-    thread_local_.jslimit_ = limit;
-    Heap::SetStackLimit(limit);
+    thread_local_.jslimit_ = jslimit;
+    Heap::SetStackLimit(jslimit);
   }
   if (thread_local_.climit_ == thread_local_.initial_climit_) {
     thread_local_.climit_ = limit;
   }
   thread_local_.initial_climit_ = limit;
-  thread_local_.initial_jslimit_ = limit;
+  thread_local_.initial_jslimit_ = jslimit;
 }
 
 
@@ -407,6 +356,61 @@
 }
 
 
+static internal::Thread::LocalStorageKey stack_limit_key =
+    internal::Thread::CreateThreadLocalKey();
+
+
+void StackGuard::FreeThreadResources() {
+  Thread::SetThreadLocal(
+      stack_limit_key,
+      reinterpret_cast<void*>(thread_local_.initial_climit_));
+}
+
+
+void StackGuard::ThreadLocal::Clear() {
+  initial_jslimit_ = kIllegalLimit;
+  jslimit_ = kIllegalLimit;
+  initial_climit_ = kIllegalLimit;
+  climit_ = kIllegalLimit;
+  nesting_ = 0;
+  postpone_interrupts_nesting_ = 0;
+  interrupt_flags_ = 0;
+  Heap::SetStackLimit(kIllegalLimit);
+}
+
+
+void StackGuard::ThreadLocal::Initialize() {
+  if (initial_climit_ == kIllegalLimit) {
+    // Takes the address of the limit variable in order to find out where
+    // the top of stack is right now.
+    intptr_t limit = reinterpret_cast<intptr_t>(&limit) - kLimitSize;
+    initial_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+    jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+    initial_climit_ = limit;
+    climit_ = limit;
+    Heap::SetStackLimit(SimulatorStack::JsLimitFromCLimit(limit));
+  }
+  nesting_ = 0;
+  postpone_interrupts_nesting_ = 0;
+  interrupt_flags_ = 0;
+}
+
+
+void StackGuard::ClearThread(const ExecutionAccess& lock) {
+  thread_local_.Clear();
+}
+
+
+void StackGuard::InitThread(const ExecutionAccess& lock) {
+  thread_local_.Initialize();
+  void* stored_limit = Thread::GetThreadLocal(stack_limit_key);
+  // You should hold the ExecutionAccess lock when you call this.
+  if (stored_limit != NULL) {
+    StackGuard::SetStackLimit(reinterpret_cast<intptr_t>(stored_limit));
+  }
+}
+
+
 // --- C a l l s   t o   n a t i v e s ---
 
 #define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
diff --git a/src/execution.h b/src/execution.h
index 4cdfd2b..55307f7 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -141,14 +141,13 @@
 class ExecutionAccess;
 
 
-// Stack guards are used to limit the number of nested invocations of
-// JavaScript and the stack size used in each invocation.
-class StackGuard BASE_EMBEDDED {
+// StackGuard contains the handling of the limits that are used to limit the
+// number of nested invocations of JavaScript and the stack size used in each
+// invocation.
+class StackGuard : public AllStatic {
  public:
-  StackGuard();
-
-  ~StackGuard();
-
+  // Pass the address beyond which the stack should not grow.  The stack
+  // is assumed to grow downwards.
   static void SetStackLimit(uintptr_t limit);
 
   static Address address_of_jslimit() {
@@ -159,6 +158,13 @@
   static char* ArchiveStackGuard(char* to);
   static char* RestoreStackGuard(char* from);
   static int ArchiveSpacePerThread();
+  static void FreeThreadResources();
+  // Sets up the default stack guard for this thread if it has not
+  // already been set up.
+  static void InitThread(const ExecutionAccess& lock);
+  // Clears the stack guard for this thread so it does not look as if
+  // it has been set up.
+  static void ClearThread(const ExecutionAccess& lock);
 
   static bool IsStackOverflow();
   static bool IsPreempted();
@@ -175,6 +181,13 @@
 #endif
   static void Continue(InterruptFlag after_what);
 
+  // This provides an asynchronous read of the stack limit for the current
+  // thread.  There are no locks protecting this, but it is assumed that you
+  // have the global V8 lock if you are using multiple V8 threads.
+  static uintptr_t climit() {
+    return thread_local_.climit_;
+  }
+
   static uintptr_t jslimit() {
     return thread_local_.jslimit_;
   }
@@ -183,13 +196,6 @@
   // You should hold the ExecutionAccess lock when calling this method.
   static bool IsSet(const ExecutionAccess& lock);
 
-  // This provides an asynchronous read of the stack limit for the current
-  // thread.  There are no locks protecting this, but it is assumed that you
-  // have the global V8 lock if you are using multiple V8 threads.
-  static uintptr_t climit() {
-    return thread_local_.climit_;
-  }
-
   // You should hold the ExecutionAccess lock when calling this method.
   static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
     Heap::SetStackLimit(value);
@@ -200,14 +206,9 @@
   // Reset limits to initial values. For example after handling interrupt.
   // You should hold the ExecutionAccess lock when calling this method.
   static void reset_limits(const ExecutionAccess& lock) {
-    if (thread_local_.nesting_ == 0) {
-      // No limits have been set yet.
-      set_limits(kIllegalLimit, lock);
-    } else {
-      thread_local_.jslimit_ = thread_local_.initial_jslimit_;
-      Heap::SetStackLimit(thread_local_.jslimit_);
-      thread_local_.climit_ = thread_local_.initial_climit_;
-    }
+    thread_local_.jslimit_ = thread_local_.initial_jslimit_;
+    Heap::SetStackLimit(thread_local_.jslimit_);
+    thread_local_.climit_ = thread_local_.initial_climit_;
   }
 
   // Enable or disable interrupts.
@@ -217,24 +218,19 @@
   static const uintptr_t kLimitSize = kPointerSize * 128 * KB;
 #ifdef V8_TARGET_ARCH_X64
   static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
-  static const uintptr_t kIllegalLimit = V8_UINT64_C(0xffffffffffffffff);
+  static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
 #else
   static const uintptr_t kInterruptLimit = 0xfffffffe;
-  static const uintptr_t kIllegalLimit = 0xffffffff;
+  static const uintptr_t kIllegalLimit = 0xfffffff8;
 #endif
 
   class ThreadLocal {
    public:
-    ThreadLocal()
-      : initial_jslimit_(kIllegalLimit),
-        jslimit_(kIllegalLimit),
-        initial_climit_(kIllegalLimit),
-        climit_(kIllegalLimit),
-        nesting_(0),
-        postpone_interrupts_nesting_(0),
-        interrupt_flags_(0) {
-      Heap::SetStackLimit(kIllegalLimit);
-    }
+    ThreadLocal() { Clear(); }
+    // You should hold the ExecutionAccess lock when you call Initialize or
+    // Clear.
+    void Initialize();
+    void Clear();
     uintptr_t initial_jslimit_;
     uintptr_t jslimit_;
     uintptr_t initial_climit_;
diff --git a/src/factory.cc b/src/factory.cc
index d91b266..622055c 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -673,6 +673,11 @@
 }
 
 
+Handle<String> Factory::NumberToString(Handle<Object> number) {
+  CALL_HEAP_FUNCTION(Heap::NumberToString(*number), String);
+}
+
+
 Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
     Handle<NumberDictionary> dictionary,
     uint32_t key,
diff --git a/src/factory.h b/src/factory.h
index ddf71de..0596fbf 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -286,6 +286,8 @@
       Handle<Object> value,
       PropertyAttributes attributes);
 
+  static Handle<String> NumberToString(Handle<Object> number);
+
   enum ApiInstanceType {
     JavaScriptObject,
     InnerGlobalObject,
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index c05feb4..91c5bca 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -133,7 +133,6 @@
 DEFINE_bool(strict, false, "strict error checking")
 DEFINE_int(min_preparse_length, 1024,
            "Minimum length for automatic enable preparsing")
-DEFINE_bool(multipass, false, "use the multipass code generator")
 
 // compilation-cache.cc
 DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -271,7 +270,6 @@
 // compiler.cc
 DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
 DEFINE_bool(print_scopes, false, "print scopes")
-DEFINE_bool(print_cfg, false, "print control-flow graph")
 
 // contexts.cc
 DEFINE_bool(trace_contexts, false, "trace contexts operations")
diff --git a/src/handles.cc b/src/handles.cc
index 931e3b9..b43ec53 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -29,6 +29,7 @@
 
 #include "accessors.h"
 #include "api.h"
+#include "arguments.h"
 #include "bootstrapper.h"
 #include "compiler.h"
 #include "debug.h"
@@ -46,10 +47,10 @@
 
 
 int HandleScope::NumberOfHandles() {
-  int n = HandleScopeImplementer::instance()->Blocks()->length();
+  int n = HandleScopeImplementer::instance()->blocks()->length();
   if (n == 0) return 0;
   return ((n - 1) * kHandleBlockSize) +
-      (current_.next - HandleScopeImplementer::instance()->Blocks()->last());
+      (current_.next - HandleScopeImplementer::instance()->blocks()->last());
 }
 
 
@@ -67,8 +68,8 @@
   HandleScopeImplementer* impl = HandleScopeImplementer::instance();
   // If there's more room in the last block, we use that. This is used
   // for fast creation of scopes after scope barriers.
-  if (!impl->Blocks()->is_empty()) {
-    Object** limit = &impl->Blocks()->last()[kHandleBlockSize];
+  if (!impl->blocks()->is_empty()) {
+    Object** limit = &impl->blocks()->last()[kHandleBlockSize];
     if (current_.limit != limit) {
       current_.limit = limit;
     }
@@ -81,7 +82,7 @@
     result = impl->GetSpareOrNewBlock();
     // Add the extension to the global list of blocks, but count the
     // extension as part of the current scope.
-    impl->Blocks()->Add(result);
+    impl->blocks()->Add(result);
     current_.extensions++;
     current_.limit = &result[kHandleBlockSize];
   }
@@ -479,15 +480,17 @@
 }
 
 
+void CustomArguments::IterateInstance(ObjectVisitor* v) {
+  v->VisitPointers(values_, values_ + 4);
+}
+
+
 // Compute the property keys from the interceptor.
 v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
                                                  Handle<JSObject> object) {
   Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
-  Handle<Object> data(interceptor->data());
-  v8::AccessorInfo info(
-    v8::Utils::ToLocal(receiver),
-    v8::Utils::ToLocal(data),
-    v8::Utils::ToLocal(object));
+  CustomArguments args(interceptor->data(), *receiver, *object);
+  v8::AccessorInfo info(args.end());
   v8::Handle<v8::Array> result;
   if (!interceptor->enumerator()->IsUndefined()) {
     v8::NamedPropertyEnumerator enum_fun =
@@ -507,11 +510,8 @@
 v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
                                                    Handle<JSObject> object) {
   Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
-  Handle<Object> data(interceptor->data());
-  v8::AccessorInfo info(
-    v8::Utils::ToLocal(receiver),
-    v8::Utils::ToLocal(data),
-    v8::Utils::ToLocal(object));
+  CustomArguments args(interceptor->data(), *receiver, *object);
+  v8::AccessorInfo info(args.end());
   v8::Handle<v8::Array> result;
   if (!interceptor->enumerator()->IsUndefined()) {
     v8::IndexedPropertyEnumerator enum_fun =
diff --git a/src/heap.cc b/src/heap.cc
index 1a80d64..dcc25a3 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -77,14 +77,17 @@
 int Heap::semispace_size_  = 512*KB;
 int Heap::old_generation_size_ = 128*MB;
 int Heap::initial_semispace_size_ = 128*KB;
+size_t Heap::code_range_size_ = 0;
 #elif defined(V8_TARGET_ARCH_X64)
 int Heap::semispace_size_  = 16*MB;
 int Heap::old_generation_size_ = 1*GB;
 int Heap::initial_semispace_size_ = 1*MB;
+size_t Heap::code_range_size_ = 256*MB;
 #else
 int Heap::semispace_size_  = 8*MB;
 int Heap::old_generation_size_ = 512*MB;
 int Heap::initial_semispace_size_ = 512*KB;
+size_t Heap::code_range_size_ = 0;
 #endif
 
 GCCallback Heap::global_gc_prologue_callback_ = NULL;
@@ -497,8 +500,8 @@
     DisableAssertNoAllocation allow_allocation;
     GlobalHandles::PostGarbageCollectionProcessing();
   }
-  // Update flat string readers.
-  FlatStringReader::PostGarbageCollectionProcessing();
+  // Update relocatables.
+  Relocatable::PostGarbageCollectionProcessing();
 }
 
 
@@ -1250,6 +1253,10 @@
   // spaces.
   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+  // New space can't cope with forced allocation.
+  if (always_allocate()) space = OLD_DATA_SPACE;
+
   Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
   if (result->IsFailure()) return result;
 
@@ -1261,7 +1268,8 @@
 
 Object* Heap::AllocateHeapNumber(double value) {
   // Use general version, if we're forced to always allocate.
-  if (always_allocate()) return AllocateHeapNumber(value, NOT_TENURED);
+  if (always_allocate()) return AllocateHeapNumber(value, TENURED);
+
   // This version of AllocateHeapNumber is optimized for
   // allocation in new space.
   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
@@ -1582,6 +1590,31 @@
 }
 
 
+Object* Heap::NumberToString(Object* number) {
+  Object* cached = GetNumberStringCache(number);
+  if (cached != undefined_value()) {
+    return cached;
+  }
+
+  char arr[100];
+  Vector<char> buffer(arr, ARRAY_SIZE(arr));
+  const char* str;
+  if (number->IsSmi()) {
+    int num = Smi::cast(number)->value();
+    str = IntToCString(num, buffer);
+  } else {
+    double num = HeapNumber::cast(number)->value();
+    str = DoubleToCString(num, buffer);
+  }
+  Object* result = AllocateStringFromAscii(CStrVector(str));
+
+  if (!result->IsFailure()) {
+    SetNumberStringCache(number, String::cast(result));
+  }
+  return result;
+}
+
+
 Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
   return SmiOrNumberFromDouble(value,
                                true /* number object must be new */,
@@ -1862,6 +1895,9 @@
   AllocationSpace space =
       size > MaxObjectSizeInPagedSpace() ? LO_SPACE : NEW_SPACE;
 
+  // New space can't cope with forced allocation.
+  if (always_allocate()) space = LO_SPACE;
+
   Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
 
   if (result->IsFailure()) return result;
@@ -1889,6 +1925,9 @@
                                  PretenureFlag pretenure) {
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
 
+  // New space can't cope with forced allocation.
+  if (always_allocate()) space = OLD_DATA_SPACE;
+
   Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
 
   if (result->IsFailure()) return result;
@@ -1923,6 +1962,7 @@
   // Initialize the object
   HeapObject::cast(result)->set_map(code_map());
   Code* code = Code::cast(result);
+  ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
   code->set_instruction_size(desc.instr_size);
   code->set_relocation_size(desc.reloc_size);
   code->set_sinfo_size(sinfo_size);
@@ -1967,6 +2007,7 @@
             obj_size);
   // Relocate the copy.
   Code* new_code = Code::cast(result);
+  ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
   new_code->Relocate(new_addr - old_addr);
   return new_code;
 }
@@ -2532,13 +2573,17 @@
 
 Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+  // New space can't cope with forced allocation.
+  if (always_allocate()) space = OLD_DATA_SPACE;
+
   int size = SeqAsciiString::SizeFor(length);
 
   Object* result = Failure::OutOfMemoryException();
   if (space == NEW_SPACE) {
     result = size <= kMaxObjectSizeInNewSpace
         ? new_space_.AllocateRaw(size)
-        : lo_space_->AllocateRawFixedArray(size);
+        : lo_space_->AllocateRaw(size);
   } else {
     if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
     result = AllocateRaw(size, space, OLD_DATA_SPACE);
@@ -2565,13 +2610,17 @@
 
 Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+  // New space can't cope with forced allocation.
+  if (always_allocate()) space = OLD_DATA_SPACE;
+
   int size = SeqTwoByteString::SizeFor(length);
 
   Object* result = Failure::OutOfMemoryException();
   if (space == NEW_SPACE) {
     result = size <= kMaxObjectSizeInNewSpace
         ? new_space_.AllocateRaw(size)
-        : lo_space_->AllocateRawFixedArray(size);
+        : lo_space_->AllocateRaw(size);
   } else {
     if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
     result = AllocateRaw(size, space, OLD_DATA_SPACE);
@@ -2609,7 +2658,7 @@
 
 Object* Heap::AllocateRawFixedArray(int length) {
   // Use the general function if we're forced to always allocate.
-  if (always_allocate()) return AllocateFixedArray(length, NOT_TENURED);
+  if (always_allocate()) return AllocateFixedArray(length, TENURED);
   // Allocate the raw data for a fixed array.
   int size = FixedArray::SizeFor(length);
   return size <= kMaxObjectSizeInNewSpace
@@ -2662,6 +2711,9 @@
   ASSERT(empty_fixed_array()->IsFixedArray());
   if (length == 0) return empty_fixed_array();
 
+  // New space can't cope with forced allocation.
+  if (always_allocate()) pretenure = TENURED;
+
   int size = FixedArray::SizeFor(length);
   Object* result = Failure::OutOfMemoryException();
   if (pretenure != TENURED) {
@@ -3088,6 +3140,8 @@
   SYNCHRONIZE_TAG("bootstrapper");
   Top::Iterate(v);
   SYNCHRONIZE_TAG("top");
+  Relocatable::Iterate(v);
+  SYNCHRONIZE_TAG("relocatable");
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Debug::Iterate(v);
@@ -3212,6 +3266,14 @@
 
   // Initialize the code space, set its maximum capacity to the old
   // generation size. It needs executable memory.
+  // On 64-bit platform(s), we put all code objects in a 2 GB range of
+  // virtual address space, so that they can call each other with near calls.
+  if (code_range_size_ > 0) {
+    if (!CodeRange::Setup(code_range_size_)) {
+      return false;
+    }
+  }
+
   code_space_ =
       new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
   if (code_space_ == NULL) return false;
diff --git a/src/heap.h b/src/heap.h
index 92602c8..e878efc 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -882,11 +882,14 @@
     kRootListLength
   };
 
+  static Object* NumberToString(Object* number);
+
  private:
   static int semispace_size_;
   static int initial_semispace_size_;
   static int young_generation_size_;
   static int old_generation_size_;
+  static size_t code_range_size_;
 
   // For keeping track of how much data has survived
   // scavenge since last new space expansion.
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 9a5352b..1de20f4 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -85,19 +85,25 @@
 
 Object* RelocInfo::target_object() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return *reinterpret_cast<Object**>(pc_);
+  return Memory::Object_at(pc_);
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return Memory::Object_Handle_at(pc_);
 }
 
 
 Object** RelocInfo::target_object_address() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return reinterpret_cast<Object**>(pc_);
+  return &Memory::Object_at(pc_);
 }
 
 
 void RelocInfo::set_target_object(Object* target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  *reinterpret_cast<Object**>(pc_) = target;
+  Memory::Object_at(pc_) = target;
 }
 
 
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index e7712df..ad44026 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -42,10 +42,10 @@
   __ mov(Operand::StaticVariable(passed), edi);
 
   // The actual argument count has already been loaded into register
-  // eax, but JumpToBuiltin expects eax to contain the number of
+  // eax, but JumpToRuntime expects eax to contain the number of
   // arguments including the receiver.
   __ inc(eax);
-  __ JumpToBuiltin(ExternalReference(id));
+  __ JumpToRuntime(ExternalReference(id));
 }
 
 
@@ -129,12 +129,7 @@
     // eax: initial map
     __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
     __ shl(edi, kPointerSizeLog2);
-    __ AllocateObjectInNewSpace(edi,
-                                ebx,
-                                edi,
-                                no_reg,
-                                &rt_call,
-                                NO_ALLOCATION_FLAGS);
+    __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
     // Allocated the JSObject, now initialize the fields.
     // eax: initial map
     // ebx: JSObject
@@ -189,14 +184,14 @@
     // ebx: JSObject
     // edi: start of next object (will be start of FixedArray)
     // edx: number of elements in properties array
-    __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
-                                times_pointer_size,
-                                edx,
-                                edi,
-                                ecx,
-                                no_reg,
-                                &undo_allocation,
-                                RESULT_CONTAINS_TOP);
+    __ AllocateInNewSpace(FixedArray::kHeaderSize,
+                          times_pointer_size,
+                          edx,
+                          edi,
+                          ecx,
+                          no_reg,
+                          &undo_allocation,
+                          RESULT_CONTAINS_TOP);
 
     // Initialize the FixedArray.
     // ebx: JSObject
@@ -674,18 +669,18 @@
 
 
 // Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter holes is larger than zero an elements backing
-// store is allocated with this size and filled with the hole values. Otherwise
-// the elements backing store is set to the empty FixedArray.
+// register. If the parameter initial_capacity is larger than zero an elements
+// backing store is allocated with this size and filled with the hole values.
+// Otherwise the elements backing store is set to the empty FixedArray.
 static void AllocateEmptyJSArray(MacroAssembler* masm,
                                  Register array_function,
                                  Register result,
                                  Register scratch1,
                                  Register scratch2,
                                  Register scratch3,
-                                 int holes,
+                                 int initial_capacity,
                                  Label* gc_required) {
-  ASSERT(holes >= 0);
+  ASSERT(initial_capacity >= 0);
 
   // Load the initial map from the array function.
   __ mov(scratch1, FieldOperand(array_function,
@@ -694,15 +689,15 @@
   // Allocate the JSArray object together with space for a fixed array with the
   // requested elements.
   int size = JSArray::kSize;
-  if (holes > 0) {
-    size += FixedArray::SizeFor(holes);
+  if (initial_capacity > 0) {
+    size += FixedArray::SizeFor(initial_capacity);
   }
-  __ AllocateObjectInNewSpace(size,
-                              result,
-                              scratch2,
-                              scratch3,
-                              gc_required,
-                              TAG_OBJECT);
+  __ AllocateInNewSpace(size,
+                        result,
+                        scratch2,
+                        scratch3,
+                        gc_required,
+                        TAG_OBJECT);
 
   // Allocated the JSArray. Now initialize the fields except for the elements
   // array.
@@ -717,7 +712,7 @@
 
   // If no storage is requested for the elements array just set the empty
   // fixed array.
-  if (holes == 0) {
+  if (initial_capacity == 0) {
     __ mov(FieldOperand(result, JSArray::kElementsOffset),
            Factory::empty_fixed_array());
     return;
@@ -737,17 +732,18 @@
   // scratch2: start of next object
   __ mov(FieldOperand(scratch1, JSObject::kMapOffset),
          Factory::fixed_array_map());
-  __ mov(FieldOperand(scratch1, Array::kLengthOffset), Immediate(holes));
+  __ mov(FieldOperand(scratch1, Array::kLengthOffset),
+         Immediate(initial_capacity));
 
   // Fill the FixedArray with the hole value. Inline the code if short.
   // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
   static const int kLoopUnfoldLimit = 4;
   ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
-  if (holes <= kLoopUnfoldLimit) {
+  if (initial_capacity <= kLoopUnfoldLimit) {
     // Use a scratch register here to have only one reloc info when unfolding
     // the loop.
     __ mov(scratch3, Factory::the_hole_value());
-    for (int i = 0; i < holes; i++) {
+    for (int i = 0; i < initial_capacity; i++) {
       __ mov(FieldOperand(scratch1,
                           FixedArray::kHeaderSize + i * kPointerSize),
              scratch3);
@@ -797,26 +793,26 @@
   // If an empty array is requested allocate a small elements array anyway. This
   // keeps the code below free of special casing for the empty array.
   int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
-  __ AllocateObjectInNewSpace(size,
-                              result,
-                              elements_array_end,
-                              scratch,
-                              gc_required,
-                              TAG_OBJECT);
+  __ AllocateInNewSpace(size,
+                        result,
+                        elements_array_end,
+                        scratch,
+                        gc_required,
+                        TAG_OBJECT);
   __ jmp(&allocated);
 
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested elements.
   __ bind(&not_empty);
   ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ AllocateObjectInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
-                              times_half_pointer_size,  // array_size is a smi.
-                              array_size,
-                              result,
-                              elements_array_end,
-                              scratch,
-                              gc_required,
-                              TAG_OBJECT);
+  __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+                        times_half_pointer_size,  // array_size is a smi.
+                        array_size,
+                        result,
+                        elements_array_end,
+                        scratch,
+                        gc_required,
+                        TAG_OBJECT);
 
   // Allocated the JSArray. Now initialize the fields except for the elements
   // array.
diff --git a/src/ia32/cfg-ia32.cc b/src/ia32/cfg-ia32.cc
deleted file mode 100644
index 58985a5..0000000
--- a/src/ia32/cfg-ia32.cc
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-ia32.h"
-#include "macro-assembler-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
-  ASSERT(!is_marked());
-  is_marked_ = true;
-  {
-    Comment cmt(masm, "[ InstructionBlock");
-    for (int i = 0, len = instructions_.length(); i < len; i++) {
-      // If the location of the current instruction is a temp, then the
-      // instruction cannot be in tail position in the block.  Allocate the
-      // temp based on peeking ahead to the next instruction.
-      Instruction* instr = instructions_[i];
-      Location* loc = instr->location();
-      if (loc->is_temporary()) {
-        instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
-      }
-      instructions_[i]->Compile(masm);
-    }
-  }
-  successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
-  ASSERT(!is_marked());
-  is_marked_ = true;
-  Label deferred_enter, deferred_exit;
-  {
-    Comment cmnt(masm, "[ EntryNode");
-    __ push(ebp);
-    __ mov(ebp, esp);
-    __ push(esi);
-    __ push(edi);
-    int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
-    if (count > 0) {
-      __ Set(eax, Immediate(Factory::undefined_value()));
-      for (int i = 0; i < count; i++) {
-        __ push(eax);
-      }
-    }
-    if (FLAG_trace) {
-      __ CallRuntime(Runtime::kTraceEnter, 0);
-    }
-    if (FLAG_check_stack) {
-      ExternalReference stack_limit =
-          ExternalReference::address_of_stack_guard_limit();
-      __ cmp(esp, Operand::StaticVariable(stack_limit));
-      __ j(below, &deferred_enter);
-      __ bind(&deferred_exit);
-    }
-  }
-  successor_->Compile(masm);
-  if (FLAG_check_stack) {
-    Comment cmnt(masm, "[ Deferred Stack Check");
-    __ bind(&deferred_enter);
-    StackCheckStub stub;
-    __ CallStub(&stub);
-    __ jmp(&deferred_exit);
-  }
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
-  ASSERT(!is_marked());
-  is_marked_ = true;
-  Comment cmnt(masm, "[ ExitNode");
-  if (FLAG_trace) {
-    __ push(eax);
-    __ CallRuntime(Runtime::kTraceExit, 1);
-  }
-  __ RecordJSReturn();
-  __ mov(esp, ebp);
-  __ pop(ebp);
-  int count = CfgGlobals::current()->fun()->scope()->num_parameters();
-  __ ret((count + 1) * kPointerSize);
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
-  // The key should not be on the stack---if it is a compiler-generated
-  // temporary it is in the accumulator.
-  ASSERT(!key()->is_on_stack());
-
-  Comment cmnt(masm, "[ Load from Property");
-  // If the key is known at compile-time we may be able to use a load IC.
-  bool is_keyed_load = true;
-  if (key()->is_constant()) {
-    // Still use the keyed load IC if the key can be parsed as an integer so
-    // we will get into the case that handles [] on string objects.
-    Handle<Object> key_val = Constant::cast(key())->handle();
-    uint32_t ignored;
-    if (key_val->IsSymbol() &&
-        !String::cast(*key_val)->AsArrayIndex(&ignored)) {
-      is_keyed_load = false;
-    }
-  }
-
-  if (!object()->is_on_stack()) object()->Push(masm);
-  // A test eax instruction after the call indicates to the IC code that it
-  // was inlined.  Ensure there is not one here.
-  if (is_keyed_load) {
-    key()->Push(masm);
-    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-    __ call(ic, RelocInfo::CODE_TARGET);
-    __ pop(ebx);  // Discard key.
-  } else {
-    key()->Get(masm, ecx);
-    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-    __ call(ic, RelocInfo::CODE_TARGET);
-  }
-  __ pop(ebx);  // Discard receiver.
-  location()->Set(masm, eax);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
-  // The right-hand value should not be on the stack---if it is a
-  // compiler-generated temporary it is in the accumulator.
-  ASSERT(!right()->is_on_stack());
-
-  Comment cmnt(masm, "[ BinaryOpInstr");
-  // We can overwrite one of the operands if it is a temporary.
-  OverwriteMode mode = NO_OVERWRITE;
-  if (left()->is_temporary()) {
-    mode = OVERWRITE_LEFT;
-  } else if (right()->is_temporary()) {
-    mode = OVERWRITE_RIGHT;
-  }
-
-  // Push both operands and call the specialized stub.
-  if (!left()->is_on_stack()) left()->Push(masm);
-  right()->Push(masm);
-  GenericBinaryOpStub stub(op(), mode, SMI_CODE_IN_STUB);
-  __ CallStub(&stub);
-  location()->Set(masm, eax);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
-  // The location should be 'Effect'.  As a side effect, move the value to
-  // the accumulator.
-  Comment cmnt(masm, "[ ReturnInstr");
-  value_->Get(masm, eax);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
-  __ mov(reg, Immediate(handle_));
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
-  __ push(Immediate(handle_));
-}
-
-
-static Operand ToOperand(SlotLocation* loc) {
-  switch (loc->type()) {
-    case Slot::PARAMETER: {
-      int count = CfgGlobals::current()->fun()->scope()->num_parameters();
-      return Operand(ebp, (1 + count - loc->index()) * kPointerSize);
-    }
-    case Slot::LOCAL: {
-      const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
-      return Operand(ebp, kOffset - loc->index() * kPointerSize);
-    }
-    default:
-      UNREACHABLE();
-      return Operand(eax);
-  }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
-  __ mov(ToOperand(loc), Immediate(handle_));
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
-  __ mov(reg, ToOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
-  __ mov(ToOperand(this), reg);
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
-  __ push(ToOperand(this));
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
-  // We dispatch to the value because in some cases (temp or constant)
-  // we can use a single instruction.
-  value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
-  // The accumulator is not live across a MoveInstr.
-  __ mov(eax, ToOperand(this));
-  __ mov(ToOperand(loc), eax);
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
-  switch (where_) {
-    case ACCUMULATOR:
-      if (!reg.is(eax)) __ mov(reg, eax);
-      break;
-    case STACK:
-      __ pop(reg);
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
-  switch (where_) {
-    case ACCUMULATOR:
-      if (!reg.is(eax)) __ mov(eax, reg);
-      break;
-    case STACK:
-      __ push(reg);
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
-  switch (where_) {
-    case ACCUMULATOR:
-      __ push(eax);
-      break;
-    case STACK:
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
-  switch (where_) {
-    case ACCUMULATOR:
-      value->Get(masm, eax);
-      break;
-    case STACK:
-      value->Push(masm);
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
-  switch (where_) {
-    case ACCUMULATOR:
-      __ mov(ToOperand(loc), eax);
-      break;
-    case STACK:
-      __ pop(ToOperand(loc));
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index d9f6672..0e314b9 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -2305,7 +2305,6 @@
 
 void CodeGenerator::VisitDeclaration(Declaration* node) {
   Comment cmnt(masm_, "[ Declaration");
-  CodeForStatementPosition(node);
   Variable* var = node->proxy()->var();
   ASSERT(var != NULL);  // must have been resolved
   Slot* slot = var->slot();
@@ -2544,10 +2543,12 @@
   masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
   DeleteFrame();
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
   // Check that the size of the code used for returning matches what is
   // expected by the debugger.
   ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
             masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
 }
 
 
@@ -4333,7 +4334,6 @@
 
 void CodeGenerator::VisitAssignment(Assignment* node) {
   Comment cmnt(masm_, "[ Assignment");
-  CodeForStatementPosition(node);
 
   { Reference target(this, node->target());
     if (target.is_illegal()) {
@@ -4415,8 +4415,6 @@
 
 void CodeGenerator::VisitThrow(Throw* node) {
   Comment cmnt(masm_, "[ Throw");
-  CodeForStatementPosition(node);
-
   Load(node->exception());
   Result result = frame_->CallRuntime(Runtime::kThrow, 1);
   frame_->Push(&result);
@@ -4433,12 +4431,10 @@
 void CodeGenerator::VisitCall(Call* node) {
   Comment cmnt(masm_, "[ Call");
 
+  Expression* function = node->expression();
   ZoneList<Expression*>* args = node->arguments();
 
-  CodeForStatementPosition(node);
-
   // Check if the function is a variable or a property.
-  Expression* function = node->expression();
   Variable* var = function->AsVariableProxy()->AsVariable();
   Property* property = function->AsProperty();
 
@@ -4451,7 +4447,63 @@
   // is resolved in cache misses (this also holds for megamorphic calls).
   // ------------------------------------------------------------------------
 
-  if (var != NULL && !var->is_this() && var->is_global()) {
+  if (var != NULL && var->is_possibly_eval()) {
+    // ----------------------------------
+    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
+    // ----------------------------------
+
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+
+    // Prepare the stack for the call to the resolved function.
+    Load(function);
+
+    // Allocate a frame slot for the receiver.
+    frame_->Push(Factory::undefined_value());
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      Load(args->at(i));
+    }
+
+    // Prepare the stack for the call to ResolvePossiblyDirectEval.
+    frame_->PushElementAt(arg_count + 1);
+    if (arg_count > 0) {
+      frame_->PushElementAt(arg_count);
+    } else {
+      frame_->Push(Factory::undefined_value());
+    }
+
+    // Resolve the call.
+    Result result =
+        frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+    // Touch up the stack with the right values for the function and the
+    // receiver.  Use a scratch register to avoid destroying the result.
+    Result scratch = allocator_->Allocate();
+    ASSERT(scratch.is_valid());
+    __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
+    frame_->SetElementAt(arg_count + 1, &scratch);
+
+    // We can reuse the result register now.
+    frame_->Spill(result.reg());
+    __ mov(result.reg(),
+           FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
+    frame_->SetElementAt(arg_count, &result);
+
+    // Call the function.
+    CodeForSourcePosition(node->position());
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub call_function(arg_count, in_loop);
+    result = frame_->CallStub(&call_function, arg_count + 1);
+
+    // Restore the context and overwrite the function on the stack with
+    // the result.
+    frame_->RestoreContextRegister();
+    frame_->SetElementAt(0, &result);
+
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
     // ----------------------------------
     // JavaScript example: 'foo(1, 2, 3)'  // foo is global
     // ----------------------------------
@@ -4591,7 +4643,6 @@
 
 void CodeGenerator::VisitCallNew(CallNew* node) {
   Comment cmnt(masm_, "[ CallNew");
-  CodeForStatementPosition(node);
 
   // According to ECMA-262, section 11.2.2, page 44, the function
   // expression in new calls must be evaluated before the
@@ -4621,66 +4672,6 @@
 }
 
 
-void CodeGenerator::VisitCallEval(CallEval* node) {
-  Comment cmnt(masm_, "[ CallEval");
-
-  // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
-  // the function we need to call and the receiver of the call.
-  // Then we call the resolved function using the given arguments.
-
-  ZoneList<Expression*>* args = node->arguments();
-  Expression* function = node->expression();
-
-  CodeForStatementPosition(node);
-
-  // Prepare the stack for the call to the resolved function.
-  Load(function);
-
-  // Allocate a frame slot for the receiver.
-  frame_->Push(Factory::undefined_value());
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  // Prepare the stack for the call to ResolvePossiblyDirectEval.
-  frame_->PushElementAt(arg_count + 1);
-  if (arg_count > 0) {
-    frame_->PushElementAt(arg_count);
-  } else {
-    frame_->Push(Factory::undefined_value());
-  }
-
-  // Resolve the call.
-  Result result =
-      frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
-  // Touch up the stack with the right values for the function and the
-  // receiver.  Use a scratch register to avoid destroying the result.
-  Result scratch = allocator_->Allocate();
-  ASSERT(scratch.is_valid());
-  __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
-  frame_->SetElementAt(arg_count + 1, &scratch);
-
-  // We can reuse the result register now.
-  frame_->Spill(result.reg());
-  __ mov(result.reg(),
-         FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
-  frame_->SetElementAt(arg_count, &result);
-
-  // Call the function.
-  CodeForSourcePosition(node->position());
-  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub call_function(arg_count, in_loop);
-  result = frame_->CallStub(&call_function, arg_count + 1);
-
-  // Restore the context and overwrite the function on the stack with
-  // the result.
-  frame_->RestoreContextRegister();
-  frame_->SetElementAt(0, &result);
-}
-
-
 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   Load(args->at(0));
@@ -6992,12 +6983,12 @@
                                              Register scratch2,
                                              Register result) {
   // Allocate heap number in new space.
-  __ AllocateObjectInNewSpace(HeapNumber::kSize,
-                              result,
-                              scratch1,
-                              scratch2,
-                              need_gc,
-                              TAG_OBJECT);
+  __ AllocateInNewSpace(HeapNumber::kSize,
+                        result,
+                        scratch1,
+                        scratch2,
+                        need_gc,
+                        TAG_OBJECT);
 
   // Set the map.
   __ mov(FieldOperand(result, HeapObject::kMapOffset),
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index afdbffe..142a5a1 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -553,7 +553,7 @@
   // information.
   void CodeForFunctionPosition(FunctionLiteral* fun);
   void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(AstNode* node);
+  void CodeForStatementPosition(Statement* stmt);
   void CodeForSourcePosition(int pos);
 
 #ifdef DEBUG
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 8211ea9..f7369a8 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -424,15 +424,14 @@
   __ mov(edx, eax);  // Save the value.
   __ sar(eax, kSmiTagSize);  // Untag the value.
   {  // Clamp the value to [0..255].
-    Label done, check_255;
-    __ cmp(eax, 0);
-    __ j(greater_equal, &check_255);
-    __ mov(eax, Immediate(0));
-    __ jmp(&done);
-    __ bind(&check_255);
-    __ cmp(eax, 255);
-    __ j(less_equal, &done);
+    Label done, is_negative;
+    __ test(eax, Immediate(0xFFFFFF00));
+    __ j(zero, &done);
+    __ j(negative, &is_negative);
     __ mov(eax, Immediate(255));
+    __ jmp(&done);
+    __ bind(&is_negative);
+    __ xor_(eax, Operand(eax));  // Clear eax.
     __ bind(&done);
   }
   __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index a8d7e44..a3b2149 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -664,12 +664,12 @@
 }
 
 
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
-                                              Register result,
-                                              Register result_end,
-                                              Register scratch,
-                                              Label* gc_required,
-                                              AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
   ASSERT(!result.is(result_end));
 
   // Load address of new object into result.
@@ -692,14 +692,14 @@
 }
 
 
-void MacroAssembler::AllocateObjectInNewSpace(int header_size,
-                                              ScaleFactor element_size,
-                                              Register element_count,
-                                              Register result,
-                                              Register result_end,
-                                              Register scratch,
-                                              Label* gc_required,
-                                              AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int header_size,
+                                        ScaleFactor element_size,
+                                        Register element_count,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
   ASSERT(!result.is(result_end));
 
   // Load address of new object into result.
@@ -722,12 +722,12 @@
 }
 
 
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
-                                              Register result,
-                                              Register result_end,
-                                              Register scratch,
-                                              Label* gc_required,
-                                              AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
   ASSERT(!result.is(result_end));
 
   // Load address of new object into result.
@@ -903,11 +903,11 @@
   // should remove this need and make the runtime routine entry code
   // smarter.
   Set(eax, Immediate(num_arguments));
-  JumpToBuiltin(ext);
+  JumpToRuntime(ext);
 }
 
 
-void MacroAssembler::JumpToBuiltin(const ExternalReference& ext) {
+void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
   // Set the entry point and jump to the C entry runtime stub.
   mov(ebx, Immediate(ext));
   CEntryStub ces(1);
@@ -1049,7 +1049,6 @@
   if (!resolved) {
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(false);
     Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
     unresolved_.Add(entry);
@@ -1068,7 +1067,6 @@
   if (!resolved) {
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(true);
     Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
     unresolved_.Add(entry);
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 60ede8a..ed72c96 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -175,30 +175,30 @@
   // and result_end have not yet been tagged as heap objects. If
   // result_contains_top_on_entry is true the contnt of result is known to be
   // the allocation top on entry (could be result_end from a previous call to
-  // AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
+  // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
   // should be no_reg as it is never used.
-  void AllocateObjectInNewSpace(int object_size,
-                                Register result,
-                                Register result_end,
-                                Register scratch,
-                                Label* gc_required,
-                                AllocationFlags flags);
+  void AllocateInNewSpace(int object_size,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
 
-  void AllocateObjectInNewSpace(int header_size,
-                                ScaleFactor element_size,
-                                Register element_count,
-                                Register result,
-                                Register result_end,
-                                Register scratch,
-                                Label* gc_required,
-                                AllocationFlags flags);
+  void AllocateInNewSpace(int header_size,
+                          ScaleFactor element_size,
+                          Register element_count,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
 
-  void AllocateObjectInNewSpace(Register object_size,
-                                Register result,
-                                Register result_end,
-                                Register scratch,
-                                Label* gc_required,
-                                AllocationFlags flags);
+  void AllocateInNewSpace(Register object_size,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
 
   // Undo allocation in new space. The object passed and objects allocated after
   // it will no longer be allocated. Make sure that no pointers are left to the
@@ -254,14 +254,14 @@
   void CallRuntime(Runtime::FunctionId id, int num_arguments);
 
   // Tail call of a runtime routine (jump).
-  // Like JumpToBuiltin, but also takes care of passing the number
+  // Like JumpToRuntime, but also takes care of passing the number
   // of arguments.
   void TailCallRuntime(const ExternalReference& ext,
                        int num_arguments,
                        int result_size);
 
-  // Jump to the builtin routine.
-  void JumpToBuiltin(const ExternalReference& ext);
+  // Jump to a runtime routine.
+  void JumpToRuntime(const ExternalReference& ext);
 
 
   // ---------------------------------------------------------------------------
@@ -321,8 +321,16 @@
                       Label* done,
                       InvokeFlag flag);
 
-  // Get the code for the given builtin. Returns if able to resolve
-  // the function in the 'resolved' flag.
+  // Prepares for a call or jump to a builtin by doing two things:
+  // 1. Emits code that fetches the builtin's function object from the context
+  //    at runtime, and puts it in the register rdi.
+  // 2. Fetches the builtin's code object, and returns it in a handle, at
+  //    compile time, so that later code can emit instructions to jump or call
+  //    the builtin directly.  If the code object has not yet been created, it
+  //    returns the builtin code object for IllegalFunction, and sets the
+  //    output parameter "resolved" to false.  Code that uses the return value
+  //    should then add the address and the builtin name to the list of fixups
+  //    called unresolved_, which is fixed up by the bootstrapper.
   Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
 
   // Activation support.
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
index 3bed268..8fa4287 100644
--- a/src/ia32/simulator-ia32.h
+++ b/src/ia32/simulator-ia32.h
@@ -28,21 +28,22 @@
 #ifndef V8_IA32_SIMULATOR_IA32_H_
 #define V8_IA32_SIMULATOR_IA32_H_
 
+#include "allocation.h"
 
 // Since there is no simulator for the ia32 architecture the only thing we can
 // do is to call the entry directly.
 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
   entry(p0, p1, p2, p3, p4);
 
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
-  (reinterpret_cast<uintptr_t>(this) >= limit ? \
-      reinterpret_cast<uintptr_t>(this) - limit : 0)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on ia32 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return c_limit;
+  }
+};
 
 // Call the generated regexp code directly. The entry function pointer should
 // expect seven int/pointer sized arguments and return an int.
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 58a3ce5..ca4e142 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1783,12 +1783,12 @@
   // ebx: initial map
   __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
   __ shl(ecx, kPointerSizeLog2);
-  __ AllocateObjectInNewSpace(ecx,
-                              edx,
-                              ecx,
-                              no_reg,
-                              &generic_stub_call,
-                              NO_ALLOCATION_FLAGS);
+  __ AllocateInNewSpace(ecx,
+                        edx,
+                        ecx,
+                        no_reg,
+                        &generic_stub_call,
+                        NO_ALLOCATION_FLAGS);
 
   // Allocated the JSObject, now initialize the fields and add the heap tag.
   // ebx: initial map
diff --git a/src/list.h b/src/list.h
index dd7ea1c..25211d9 100644
--- a/src/list.h
+++ b/src/list.h
@@ -51,6 +51,13 @@
   INLINE(explicit List(int capacity)) { Initialize(capacity); }
   INLINE(~List()) { DeleteData(data_); }
 
+  // Deallocates memory used by the list and leaves the list in a consistent
+  // empty state.
+  void Free() {
+    DeleteData(data_);
+    Initialize(0);
+  }
+
   INLINE(void* operator new(size_t size)) { return P::New(size); }
   INLINE(void operator delete(void* p, size_t)) { return P::Delete(p); }
 
diff --git a/src/log-utils.cc b/src/log-utils.cc
index dcb4b49..f327a0a 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -163,7 +163,7 @@
 
 void Log::Close() {
   if (Write == WriteToFile) {
-    fclose(output_handle_);
+    if (output_handle_ != NULL) fclose(output_handle_);
     output_handle_ = NULL;
   } else if (Write == WriteToMemory) {
     delete output_buffer_;
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 5631dec..63a6d6e 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -50,7 +50,7 @@
 };
 
 
-// Flags used for the AllocateObjectInNewSpace functions.
+// Flags used for the AllocateInNewSpace functions.
 enum AllocationFlags {
   // No special flags.
   NO_ALLOCATION_FLAGS = 0,
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index cbd47a8..a20245c 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -282,8 +282,6 @@
            rinfo->IsCallInstruction());
     HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
     MarkCompactCollector::MarkObject(code);
-    // When compacting we convert the call to a real object pointer.
-    if (IsCompacting()) rinfo->set_call_object(code);
   }
 
  private:
@@ -1383,6 +1381,14 @@
         reinterpret_cast<Code*>(target)->instruction_start());
   }
 
+  void VisitDebugTarget(RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) && rinfo->IsCallInstruction());
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    VisitPointer(&target);
+    rinfo->set_call_address(
+        reinterpret_cast<Code*>(target)->instruction_start());
+  }
+
  private:
   void UpdatePointer(Object** p) {
     if (!(*p)->IsHeapObject()) return;
diff --git a/src/memory.h b/src/memory.h
index c64699e..503492a 100644
--- a/src/memory.h
+++ b/src/memory.h
@@ -63,6 +63,10 @@
   static Object*& Object_at(Address addr)  {
     return *reinterpret_cast<Object**>(addr);
   }
+
+  static Handle<Object>& Object_Handle_at(Address addr)  {
+    return *reinterpret_cast<Handle<Object>*>(addr);
+  }
 };
 
 } }  // namespace v8::internal
diff --git a/src/messages.js b/src/messages.js
index 6513067..2720792 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -32,6 +32,11 @@
 var kVowelSounds = 0;
 var kCapitalVowelSounds = 0;
 
+// If this object gets passed to an error constructor the error will
+// get an accessor for .message that constructs a descriptive error
+// message on access.
+var kAddMessageAccessorsMarker = { };
+
 
 function GetInstanceName(cons) {
   if (cons.length == 0) {
@@ -565,11 +570,6 @@
 // ----------------------------------------------------------------------------
 // Error implementation
 
-// If this object gets passed to an error constructor the error will
-// get an accessor for .message that constructs a descriptive error
-// message on access.
-var kAddMessageAccessorsMarker = { };
-
 // Defines accessors for a property that is calculated the first time
 // the property is read.
 function DefineOneShotAccessor(obj, name, fun) {
@@ -781,14 +781,15 @@
   }
   for (var i = 0; i < frames.length; i++) {
     var frame = frames[i];
+    var line;
     try {
-      var line = FormatSourcePosition(frame);
+      line = FormatSourcePosition(frame);
     } catch (e) {
       try {
-        var line = "<error: " + e + ">";
+        line = "<error: " + e + ">";
       } catch (ee) {
         // Any code that reaches this point is seriously nasty!
-        var line = "<error>";
+        line = "<error>";
       }
     }
     lines.push("    at " + line);
diff --git a/src/mirror-delay.js b/src/mirror-delay.js
index ee3dd64..c4ab7b8 100644
--- a/src/mirror-delay.js
+++ b/src/mirror-delay.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// jsminify this file, js2c: jsmin
-
 // Touch the RegExp and Date functions to make sure that date-delay.js and
 // regexp-delay.js has been loaded. This is required as the mirrors use
 // functions within these files through the builtins object.
diff --git a/src/objects.cc b/src/objects.cc
index ea2c202..834589a 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "api.h"
+#include "arguments.h"
 #include "bootstrapper.h"
 #include "debug.h"
 #include "execution.h"
@@ -158,14 +159,12 @@
     Object* fun_obj = data->getter();
     v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
     HandleScope scope;
-    Handle<JSObject> self(JSObject::cast(receiver));
-    Handle<JSObject> holder_handle(JSObject::cast(holder));
+    JSObject* self = JSObject::cast(receiver);
+    JSObject* holder_handle = JSObject::cast(holder);
     Handle<String> key(name);
-    Handle<Object> fun_data(data->data());
-    LOG(ApiNamedPropertyAccess("load", *self, name));
-    v8::AccessorInfo info(v8::Utils::ToLocal(self),
-                          v8::Utils::ToLocal(fun_data),
-                          v8::Utils::ToLocal(holder_handle));
+    LOG(ApiNamedPropertyAccess("load", self, name));
+    CustomArguments args(data->data(), self, holder_handle);
+    v8::AccessorInfo info(args.end());
     v8::Handle<v8::Value> result;
     {
       // Leaving JavaScript.
@@ -1538,11 +1537,9 @@
   Handle<Object> value_handle(value);
   Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
   if (!interceptor->setter()->IsUndefined()) {
-    Handle<Object> data_handle(interceptor->data());
     LOG(ApiNamedPropertyAccess("interceptor-named-set", this, name));
-    v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
-                          v8::Utils::ToLocal(data_handle),
-                          v8::Utils::ToLocal(this_handle));
+    CustomArguments args(interceptor->data(), this, this);
+    v8::AccessorInfo info(args.end());
     v8::NamedPropertySetter setter =
         v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
     v8::Handle<v8::Value> result;
@@ -1605,14 +1602,10 @@
     Object* call_obj = data->setter();
     v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
     if (call_fun == NULL) return value;
-    Handle<JSObject> self(this);
-    Handle<JSObject> holder_handle(JSObject::cast(holder));
     Handle<String> key(name);
-    Handle<Object> fun_data(data->data());
     LOG(ApiNamedPropertyAccess("store", this, name));
-    v8::AccessorInfo info(v8::Utils::ToLocal(self),
-                          v8::Utils::ToLocal(fun_data),
-                          v8::Utils::ToLocal(holder_handle));
+    CustomArguments args(data->data(), this, JSObject::cast(holder));
+    v8::AccessorInfo info(args.end());
     {
       // Leaving JavaScript.
       VMState state(EXTERNAL);
@@ -2036,10 +2029,8 @@
   Handle<JSObject> receiver_handle(receiver);
   Handle<JSObject> holder_handle(this);
   Handle<String> name_handle(name);
-  Handle<Object> data_handle(interceptor->data());
-  v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
-                        v8::Utils::ToLocal(data_handle),
-                        v8::Utils::ToLocal(holder_handle));
+  CustomArguments args(interceptor->data(), receiver, this);
+  v8::AccessorInfo info(args.end());
   if (!interceptor->query()->IsUndefined()) {
     v8::NamedPropertyQuery query =
         v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
@@ -2307,11 +2298,9 @@
   if (!interceptor->deleter()->IsUndefined()) {
     v8::NamedPropertyDeleter deleter =
         v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
-    Handle<Object> data_handle(interceptor->data());
     LOG(ApiNamedPropertyAccess("interceptor-named-delete", *this_handle, name));
-    v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
-                          v8::Utils::ToLocal(data_handle),
-                          v8::Utils::ToLocal(this_handle));
+    CustomArguments args(interceptor->data(), this, this);
+    v8::AccessorInfo info(args.end());
     v8::Handle<v8::Boolean> result;
     {
       // Leaving JavaScript.
@@ -2370,11 +2359,9 @@
   v8::IndexedPropertyDeleter deleter =
       v8::ToCData<v8::IndexedPropertyDeleter>(interceptor->deleter());
   Handle<JSObject> this_handle(this);
-  Handle<Object> data_handle(interceptor->data());
   LOG(ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
-  v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
-                        v8::Utils::ToLocal(data_handle),
-                        v8::Utils::ToLocal(this_handle));
+  CustomArguments args(interceptor->data(), this, this);
+  v8::AccessorInfo info(args.end());
   v8::Handle<v8::Boolean> result;
   {
     // Leaving JavaScript.
@@ -3971,35 +3958,75 @@
 }
 
 
-FlatStringReader* FlatStringReader::top_ = NULL;
+Relocatable* Relocatable::top_ = NULL;
+
+
+void Relocatable::PostGarbageCollectionProcessing() {
+  Relocatable* current = top_;
+  while (current != NULL) {
+    current->PostGarbageCollection();
+    current = current->prev_;
+  }
+}
+
+
+// Reserve space for statics needing saving and restoring.
+int Relocatable::ArchiveSpacePerThread() {
+  return sizeof(top_);
+}
+
+
+// Archive statics that are thread local.
+char* Relocatable::ArchiveState(char* to) {
+  *reinterpret_cast<Relocatable**>(to) = top_;
+  top_ = NULL;
+  return to + ArchiveSpacePerThread();
+}
+
+
+// Restore statics that are thread local.
+char* Relocatable::RestoreState(char* from) {
+  top_ = *reinterpret_cast<Relocatable**>(from);
+  return from + ArchiveSpacePerThread();
+}
+
+
+char* Relocatable::Iterate(ObjectVisitor* v, char* thread_storage) {
+  Relocatable* top = *reinterpret_cast<Relocatable**>(thread_storage);
+  Iterate(v, top);
+  return thread_storage + ArchiveSpacePerThread();
+}
+
+
+void Relocatable::Iterate(ObjectVisitor* v) {
+  Iterate(v, top_);
+}
+
+
+void Relocatable::Iterate(ObjectVisitor* v, Relocatable* top) {
+  Relocatable* current = top;
+  while (current != NULL) {
+    current->IterateInstance(v);
+    current = current->prev_;
+  }
+}
 
 
 FlatStringReader::FlatStringReader(Handle<String> str)
     : str_(str.location()),
-      length_(str->length()),
-      prev_(top_) {
-  top_ = this;
-  RefreshState();
+      length_(str->length()) {
+  PostGarbageCollection();
 }
 
 
 FlatStringReader::FlatStringReader(Vector<const char> input)
-    : str_(NULL),
+    : str_(0),
       is_ascii_(true),
       length_(input.length()),
-      start_(input.start()),
-      prev_(top_) {
-  top_ = this;
-}
+      start_(input.start()) { }
 
 
-FlatStringReader::~FlatStringReader() {
-  ASSERT_EQ(top_, this);
-  top_ = prev_;
-}
-
-
-void FlatStringReader::RefreshState() {
+void FlatStringReader::PostGarbageCollection() {
   if (str_ == NULL) return;
   Handle<String> str(str_);
   ASSERT(str->IsFlat());
@@ -4012,15 +4039,6 @@
 }
 
 
-void FlatStringReader::PostGarbageCollectionProcessing() {
-  FlatStringReader* current = top_;
-  while (current != NULL) {
-    current->RefreshState();
-    current = current->prev_;
-  }
-}
-
-
 void StringInputBuffer::Seek(unsigned pos) {
   Reset(pos, input_);
 }
@@ -5033,15 +5051,16 @@
   int mode_mask = RelocInfo::kCodeTargetMask |
                   RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
                   RelocInfo::kApplyMask;
+  Assembler* origin = desc.origin;  // Needed to find target_object on X64.
   for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
     RelocInfo::Mode mode = it.rinfo()->rmode();
     if (mode == RelocInfo::EMBEDDED_OBJECT) {
-      Object** p = reinterpret_cast<Object**>(it.rinfo()->target_object());
+      Handle<Object> p = it.rinfo()->target_object_handle(origin);
       it.rinfo()->set_target_object(*p);
     } else if (RelocInfo::IsCodeTarget(mode)) {
       // rewrite code handles in inline cache targets to direct
       // pointers to the first instruction in the code object
-      Object** p = reinterpret_cast<Object**>(it.rinfo()->target_object());
+      Handle<Object> p = it.rinfo()->target_object_handle(origin);
       Code* code = Code::cast(*p);
       it.rinfo()->set_target_address(code->instruction_start());
     } else {
@@ -5429,10 +5448,8 @@
   Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
   Handle<JSObject> receiver_handle(receiver);
   Handle<JSObject> holder_handle(this);
-  Handle<Object> data_handle(interceptor->data());
-  v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
-                        v8::Utils::ToLocal(data_handle),
-                        v8::Utils::ToLocal(holder_handle));
+  CustomArguments args(interceptor->data(), receiver, this);
+  v8::AccessorInfo info(args.end());
   if (!interceptor->query()->IsUndefined()) {
     v8::IndexedPropertyQuery query =
         v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
@@ -5564,11 +5581,9 @@
   if (!interceptor->setter()->IsUndefined()) {
     v8::IndexedPropertySetter setter =
         v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
-    Handle<Object> data_handle(interceptor->data());
     LOG(ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
-    v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
-                          v8::Utils::ToLocal(data_handle),
-                          v8::Utils::ToLocal(this_handle));
+    CustomArguments args(interceptor->data(), this, this);
+    v8::AccessorInfo info(args.end());
     v8::Handle<v8::Value> result;
     {
       // Leaving JavaScript.
@@ -5836,13 +5851,11 @@
   Handle<JSObject> holder_handle(this);
 
   if (!interceptor->getter()->IsUndefined()) {
-    Handle<Object> data_handle(interceptor->data());
     v8::IndexedPropertyGetter getter =
         v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
     LOG(ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
-    v8::AccessorInfo info(v8::Utils::ToLocal(this_handle),
-                          v8::Utils::ToLocal(data_handle),
-                          v8::Utils::ToLocal(holder_handle));
+    CustomArguments args(interceptor->data(), receiver, this);
+    v8::AccessorInfo info(args.end());
     v8::Handle<v8::Value> result;
     {
       // Leaving JavaScript.
@@ -6074,15 +6087,13 @@
   Handle<JSObject> receiver_handle(receiver);
   Handle<JSObject> holder_handle(this);
   Handle<String> name_handle(name);
-  Handle<Object> data_handle(interceptor->data());
 
   if (!interceptor->getter()->IsUndefined()) {
     v8::NamedPropertyGetter getter =
         v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
     LOG(ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
-    v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
-                          v8::Utils::ToLocal(data_handle),
-                          v8::Utils::ToLocal(holder_handle));
+    CustomArguments args(interceptor->data(), receiver, this);
+    v8::AccessorInfo info(args.end());
     v8::Handle<v8::Value> result;
     {
       // Leaving JavaScript.
diff --git a/src/objects.h b/src/objects.h
index 5de9afa..e9430f5 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -211,7 +211,7 @@
 // NOTE: Everything following JS_VALUE_TYPE is considered a
 // JSObject for GC purposes. The first four entries here have typeof
 // 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST(V)                   \
+#define INSTANCE_TYPE_LIST_ALL(V)               \
   V(SHORT_SYMBOL_TYPE)                          \
   V(MEDIUM_SYMBOL_TYPE)                         \
   V(LONG_SYMBOL_TYPE)                           \
@@ -282,8 +282,6 @@
   V(OBJECT_TEMPLATE_INFO_TYPE)                  \
   V(SIGNATURE_INFO_TYPE)                        \
   V(TYPE_SWITCH_INFO_TYPE)                      \
-  V(DEBUG_INFO_TYPE)                            \
-  V(BREAK_POINT_INFO_TYPE)                      \
   V(SCRIPT_TYPE)                                \
                                                 \
   V(JS_VALUE_TYPE)                              \
@@ -297,6 +295,17 @@
                                                 \
   V(JS_FUNCTION_TYPE)                           \
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+#define INSTANCE_TYPE_LIST_DEBUGGER(V)          \
+  V(DEBUG_INFO_TYPE)                            \
+  V(BREAK_POINT_INFO_TYPE)
+#else
+#define INSTANCE_TYPE_LIST_DEBUGGER(V)
+#endif
+
+#define INSTANCE_TYPE_LIST(V)                   \
+  INSTANCE_TYPE_LIST_ALL(V)                     \
+  INSTANCE_TYPE_LIST_DEBUGGER(V)
 
 
 // Since string types are not consecutive, this macro is used to
@@ -673,8 +682,10 @@
   OBJECT_TEMPLATE_INFO_TYPE,
   SIGNATURE_INFO_TYPE,
   TYPE_SWITCH_INFO_TYPE,
+#ifdef ENABLE_DEBUGGER_SUPPORT
   DEBUG_INFO_TYPE,
   BREAK_POINT_INFO_TYPE,
+#endif
   SCRIPT_TYPE,
 
   JS_VALUE_TYPE,
@@ -751,14 +762,17 @@
   inline bool IsHeapNumber();
   inline bool IsString();
   inline bool IsSymbol();
+#ifdef DEBUG
+  // See objects-inl.h for more details
   inline bool IsSeqString();
   inline bool IsSlicedString();
   inline bool IsExternalString();
-  inline bool IsConsString();
   inline bool IsExternalTwoByteString();
   inline bool IsExternalAsciiString();
   inline bool IsSeqTwoByteString();
   inline bool IsSeqAsciiString();
+#endif  // DEBUG
+  inline bool IsConsString();
 
   inline bool IsNumber();
   inline bool IsByteArray();
@@ -4205,25 +4219,47 @@
 };
 
 
+// Utility superclass for stack-allocated objects that must be updated
+// on gc.  It provides two ways for the gc to update instances, either
+// iterating or updating after gc.
+class Relocatable BASE_EMBEDDED {
+ public:
+  inline Relocatable() : prev_(top_) { top_ = this; }
+  virtual ~Relocatable() {
+    ASSERT_EQ(top_, this);
+    top_ = prev_;
+  }
+  virtual void IterateInstance(ObjectVisitor* v) { }
+  virtual void PostGarbageCollection() { }
+
+  static void PostGarbageCollectionProcessing();
+  static int ArchiveSpacePerThread();
+  static char* ArchiveState(char* to);
+  static char* RestoreState(char* from);
+  static void Iterate(ObjectVisitor* v);
+  static void Iterate(ObjectVisitor* v, Relocatable* top);
+  static char* Iterate(ObjectVisitor* v, char* t);
+ private:
+  static Relocatable* top_;
+  Relocatable* prev_;
+};
+
+
 // A flat string reader provides random access to the contents of a
 // string independent of the character width of the string.  The handle
 // must be valid as long as the reader is being used.
-class FlatStringReader BASE_EMBEDDED {
+class FlatStringReader : public Relocatable {
  public:
   explicit FlatStringReader(Handle<String> str);
   explicit FlatStringReader(Vector<const char> input);
-  ~FlatStringReader();
-  void RefreshState();
+  void PostGarbageCollection();
   inline uc32 Get(int index);
   int length() { return length_; }
-  static void PostGarbageCollectionProcessing();
  private:
   String** str_;
   bool is_ascii_;
   int length_;
   const void* start_;
-  FlatStringReader* prev_;
-  static FlatStringReader* top_;
 };
 
 
@@ -4389,6 +4425,9 @@
   void JSArrayVerify();
 #endif
 
+  // Number of element slots to pre-allocate for an empty array.
+  static const int kPreallocatedArrayElements = 4;
+
   // Layout description.
   static const int kLengthOffset = JSObject::kHeaderSize;
   static const int kSize = kLengthOffset + kPointerSize;
diff --git a/src/parser.cc b/src/parser.cc
index 0abb9ed..3b24687 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -798,12 +798,6 @@
     return Call::sentinel();
   }
 
-  virtual Expression* NewCallEval(Expression* expression,
-                                  ZoneList<Expression*>* arguments,
-                                  int pos) {
-    return CallEval::sentinel();
-  }
-
   virtual Statement* EmptyStatement() {
     return NULL;
   }
@@ -854,12 +848,6 @@
     return new Call(expression, arguments, pos);
   }
 
-  virtual Expression* NewCallEval(Expression* expression,
-                                  ZoneList<Expression*>* arguments,
-                                  int pos) {
-    return new CallEval(expression, arguments, pos);
-  }
-
   virtual Statement* EmptyStatement();
 };
 
@@ -1196,7 +1184,6 @@
 bool Parser::PreParseProgram(Handle<String> source,
                              unibrow::CharacterStream* stream) {
   HistogramTimerScope timer(&Counters::pre_parse);
-  StackGuard guard;
   AssertNoZoneAllocation assert_no_zone_allocation;
   AssertNoAllocation assert_no_allocation;
   NoHandleAllocation no_handle_allocation;
@@ -1937,31 +1924,20 @@
 
 
 Statement* Parser::ParseFunctionDeclaration(bool* ok) {
-  // Parse a function literal. We may or may not have a function name.
-  // If we have a name we use it as the variable name for the function
-  // (a function declaration) and not as the function name of a function
-  // expression.
-
+  // FunctionDeclaration ::
+  //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
   Expect(Token::FUNCTION, CHECK_OK);
   int function_token_position = scanner().location().beg_pos;
-
-  Handle<String> name;
-  if (peek() == Token::IDENTIFIER) name = ParseIdentifier(CHECK_OK);
-  FunctionLiteral* fun = ParseFunctionLiteral(name, function_token_position,
-                                              DECLARATION, CHECK_OK);
-
-  if (name.is_null()) {
-    // We don't have a name - it is always an anonymous function
-    // expression.
-    return NEW(ExpressionStatement(fun));
-  } else {
-    // We have a name so even if we're not at the top-level of the
-    // global or a function scope, we treat is as such and introduce
-    // the function with it's initial value upon entering the
-    // corresponding scope.
-    Declare(name, Variable::VAR, fun, true, CHECK_OK);
-    return factory()->EmptyStatement();
-  }
+  Handle<String> name = ParseIdentifier(CHECK_OK);
+  FunctionLiteral* fun = ParseFunctionLiteral(name,
+                                              function_token_position,
+                                              DECLARATION,
+                                              CHECK_OK);
+  // Even if we're not at the top-level of the global or a function
+  // scope, we treat is as such and introduce the function with it's
+  // initial value upon entering the corresponding scope.
+  Declare(name, Variable::VAR, fun, true, CHECK_OK);
+  return factory()->EmptyStatement();
 }
 
 
@@ -2672,25 +2648,13 @@
     } else {
       Expression* expression = ParseExpression(false, CHECK_OK);
       if (peek() == Token::IN) {
-        // Report syntax error if the expression is an invalid
-        // left-hand side expression.
+        // Signal a reference error if the expression is an invalid
+        // left-hand side expression.  We could report this as a syntax
+        // error here but for compatibility with JSC we choose to report
+        // the error at runtime.
         if (expression == NULL || !expression->IsValidLeftHandSide()) {
-          if (expression != NULL && expression->AsCall() != NULL) {
-            // According to ECMA-262 host function calls are permitted to
-            // return references.  This cannot happen in our system so we
-            // will always get an error.  We could report this as a syntax
-            // error here but for compatibility with KJS and SpiderMonkey we
-            // choose to report the error at runtime.
-            Handle<String> type = Factory::invalid_lhs_in_for_in_symbol();
-            expression = NewThrowReferenceError(type);
-          } else {
-            // Invalid left hand side expressions that are not function
-            // calls are reported as syntax errors at compile time.
-            ReportMessage("invalid_lhs_in_for_in",
-                          Vector<const char*>::empty());
-            *ok = false;
-            return NULL;
-          }
+          Handle<String> type = Factory::invalid_lhs_in_for_in_symbol();
+          expression = NewThrowReferenceError(type);
         }
         ForInStatement* loop = NEW(ForInStatement(labels));
         Target target(this, loop);
@@ -2767,30 +2731,15 @@
     return expression;
   }
 
+  // Signal a reference error if the expression is an invalid left-hand
+  // side expression.  We could report this as a syntax error here but
+  // for compatibility with JSC we choose to report the error at
+  // runtime.
   if (expression == NULL || !expression->IsValidLeftHandSide()) {
-    if (expression != NULL && expression->AsCall() != NULL) {
-      // According to ECMA-262 host function calls are permitted to
-      // return references.  This cannot happen in our system so we
-      // will always get an error.  We could report this as a syntax
-      // error here but for compatibility with KJS and SpiderMonkey we
-      // choose to report the error at runtime.
-      Handle<String> type = Factory::invalid_lhs_in_assignment_symbol();
-      expression = NewThrowReferenceError(type);
-    } else {
-      // Invalid left hand side expressions that are not function
-      // calls are reported as syntax errors at compile time.
-      //
-      // NOTE: KJS sometimes delay the error reporting to runtime. If
-      // we want to be completely compatible we should do the same.
-      // For example: "(x++) = 42" gives a reference error at runtime
-      // with KJS whereas we report a syntax error at compile time.
-      ReportMessage("invalid_lhs_in_assignment", Vector<const char*>::empty());
-      *ok = false;
-      return NULL;
-    }
+    Handle<String> type = Factory::invalid_lhs_in_assignment_symbol();
+    expression = NewThrowReferenceError(type);
   }
 
-
   Token::Value op = Next();  // Get assignment operator.
   int pos = scanner().location().beg_pos;
   Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
@@ -2963,45 +2912,37 @@
   Token::Value op = peek();
   if (Token::IsUnaryOp(op)) {
     op = Next();
-    Expression* x = ParseUnaryExpression(CHECK_OK);
+    Expression* expression = ParseUnaryExpression(CHECK_OK);
 
     // Compute some expressions involving only number literals.
-    if (x && x->AsLiteral() && x->AsLiteral()->handle()->IsNumber()) {
-      double x_val = x->AsLiteral()->handle()->Number();
+    if (expression != NULL && expression->AsLiteral() &&
+        expression->AsLiteral()->handle()->IsNumber()) {
+      double value = expression->AsLiteral()->handle()->Number();
       switch (op) {
         case Token::ADD:
-          return x;
+          return expression;
         case Token::SUB:
-          return NewNumberLiteral(-x_val);
+          return NewNumberLiteral(-value);
         case Token::BIT_NOT:
-          return NewNumberLiteral(~DoubleToInt32(x_val));
+          return NewNumberLiteral(~DoubleToInt32(value));
         default: break;
       }
     }
 
-    return NEW(UnaryOperation(op, x));
+    return NEW(UnaryOperation(op, expression));
 
   } else if (Token::IsCountOp(op)) {
     op = Next();
-    Expression* x = ParseUnaryExpression(CHECK_OK);
-    if (x == NULL || !x->IsValidLeftHandSide()) {
-      if (x != NULL && x->AsCall() != NULL) {
-        // According to ECMA-262 host function calls are permitted to
-        // return references.  This cannot happen in our system so we
-        // will always get an error.  We could report this as a syntax
-        // error here but for compatibility with KJS and SpiderMonkey we
-        // choose to report the error at runtime.
-        Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
-        x = NewThrowReferenceError(type);
-      } else {
-        // Invalid left hand side expressions that are not function
-        // calls are reported as syntax errors at compile time.
-        ReportMessage("invalid_lhs_in_prefix_op", Vector<const char*>::empty());
-        *ok = false;
-        return NULL;
-      }
+    Expression* expression = ParseUnaryExpression(CHECK_OK);
+    // Signal a reference error if the expression is an invalid
+    // left-hand side expression.  We could report this as a syntax
+    // error here but for compatibility with JSC we choose to report the
+    // error at runtime.
+    if (expression == NULL || !expression->IsValidLeftHandSide()) {
+      Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
+      expression = NewThrowReferenceError(type);
     }
-    return NEW(CountOperation(true /* prefix */, op, x));
+    return NEW(CountOperation(true /* prefix */, op, expression));
 
   } else {
     return ParsePostfixExpression(ok);
@@ -3013,30 +2954,20 @@
   // PostfixExpression ::
   //   LeftHandSideExpression ('++' | '--')?
 
-  Expression* result = ParseLeftHandSideExpression(CHECK_OK);
+  Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
   if (!scanner_.has_line_terminator_before_next() && Token::IsCountOp(peek())) {
-    if (result == NULL || !result->IsValidLeftHandSide()) {
-      if (result != NULL && result->AsCall() != NULL) {
-        // According to ECMA-262 host function calls are permitted to
-        // return references.  This cannot happen in our system so we
-        // will always get an error.  We could report this as a syntax
-        // error here but for compatibility with KJS and SpiderMonkey we
-        // choose to report the error at runtime.
-        Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
-        result = NewThrowReferenceError(type);
-      } else {
-        // Invalid left hand side expressions that are not function
-        // calls are reported as syntax errors at compile time.
-        ReportMessage("invalid_lhs_in_postfix_op",
-                      Vector<const char*>::empty());
-        *ok = false;
-        return NULL;
-      }
+    // Signal a reference error if the expression is an invalid
+    // left-hand side expression.  We could report this as a syntax
+    // error here but for compatibility with JSC we choose to report the
+    // error at runtime.
+    if (expression == NULL || !expression->IsValidLeftHandSide()) {
+      Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
+      expression = NewThrowReferenceError(type);
     }
     Token::Value next = Next();
-    result = NEW(CountOperation(false /* postfix */, next, result));
+    expression = NEW(CountOperation(false /* postfix */, next, expression));
   }
-  return result;
+  return expression;
 }
 
 
@@ -3074,8 +3005,6 @@
         // declared in the current scope chain. These calls are marked as
         // potentially direct eval calls. Whether they are actually direct calls
         // to eval is determined at run time.
-
-        bool is_potentially_direct_eval = false;
         if (!is_pre_parsing_) {
           VariableProxy* callee = result->AsVariableProxy();
           if (callee != NULL && callee->IsVariable(Factory::eval_symbol())) {
@@ -3083,16 +3012,10 @@
             Variable* var = top_scope_->Lookup(name);
             if (var == NULL) {
               top_scope_->RecordEvalCall();
-              is_potentially_direct_eval = true;
             }
           }
         }
-
-        if (is_potentially_direct_eval) {
-          result = factory()->NewCallEval(result, args, pos);
-        } else {
-          result = factory()->NewCall(result, args, pos);
-        }
+        result = factory()->NewCall(result, args, pos);
         break;
       }
 
@@ -4840,8 +4763,6 @@
                  bool multiline,
                  RegExpCompileData* result) {
   ASSERT(result != NULL);
-  // Make sure we have a stack guard.
-  StackGuard guard;
   RegExpParser parser(input, &result->error, multiline);
   RegExpTree* tree = parser.ParsePattern();
   if (parser.failed()) {
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 44d283b..73d6eeb 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -554,14 +554,18 @@
     // Extracting the sample from the context is extremely machine dependent.
     ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
     mcontext_t& mcontext = ucontext->uc_mcontext;
-#if defined (__arm__) || defined(__thumb__)
-    sample.pc = mcontext.mc_r15;
-    sample.sp = mcontext.mc_r13;
-    sample.fp = mcontext.mc_r11;
-#else
+#if V8_HOST_ARCH_IA32
     sample.pc = mcontext.mc_eip;
     sample.sp = mcontext.mc_esp;
     sample.fp = mcontext.mc_ebp;
+#elif V8_HOST_ARCH_X64
+    sample.pc = mcontext.mc_rip;
+    sample.sp = mcontext.mc_rsp;
+    sample.fp = mcontext.mc_rbp;
+#elif V8_HOST_ARCH_ARM
+    sample.pc = mcontext.mc_r15;
+    sample.sp = mcontext.mc_r13;
+    sample.fp = mcontext.mc_r11;
 #endif
     active_sampler_->SampleStack(&sample);
   }
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 596b0fb..0b236a5 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -42,6 +42,7 @@
 #include <mach/mach.h>
 #include <mach/semaphore.h>
 #include <mach/task.h>
+#include <mach/vm_statistics.h>
 #include <sys/time.h>
 #include <sys/resource.h>
 #include <sys/types.h>
@@ -123,12 +124,22 @@
 }
 
 
+// Constants used for mmap.
+// kMmapFd is used to pass vm_alloc flags to tag the region with the user
+// defined tag 255 This helps identify V8-allocated regions in memory analysis
+// tools like vmmap(1).
+static const int kMmapFd = VM_MAKE_TAG(255);
+static const off_t kMmapFdOffset = 0;
+
+
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
   const size_t msize = RoundUp(requested, getpagesize());
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+  void* mbase = mmap(NULL, msize, prot,
+                     MAP_PRIVATE | MAP_ANON,
+                     kMmapFd, kMmapFdOffset);
   if (mbase == MAP_FAILED) {
     LOG(StringEvent("OS::Allocate", "mmap failed"));
     return NULL;
@@ -280,9 +291,6 @@
 }
 
 
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
 
 
 VirtualMemory::VirtualMemory(size_t size) {
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 7a8af40..bf66c4b 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -358,11 +358,6 @@
 }
 
 
-void PrettyPrinter::VisitCallEval(CallEval* node) {
-  VisitCall(node);
-}
-
-
 void PrettyPrinter::VisitCallNew(CallNew* node) {
   Print("new (");
   Visit(node->expression());
@@ -1040,11 +1035,6 @@
 }
 
 
-void AstPrinter::VisitCallEval(CallEval* node) {
-  VisitCall(node);
-}
-
-
 void AstPrinter::VisitCallNew(CallNew* node) {
   IndentedScope indent("CALL NEW");
   Visit(node->expression());
diff --git a/src/regexp-stack.cc b/src/regexp-stack.cc
index 83cb6e4..87a674d 100644
--- a/src/regexp-stack.cc
+++ b/src/regexp-stack.cc
@@ -69,6 +69,14 @@
 }
 
 
+void RegExpStack::ThreadLocal::Free() {
+  if (thread_local_.memory_size_ > 0) {
+    DeleteArray(thread_local_.memory_);
+    thread_local_ = ThreadLocal();
+  }
+}
+
+
 Address RegExpStack::EnsureCapacity(size_t size) {
   if (size > kMaximumStackSize) return NULL;
   if (size < kMinimumStackSize) size = kMinimumStackSize;
diff --git a/src/regexp-stack.h b/src/regexp-stack.h
index 99cf33c..319ab28 100644
--- a/src/regexp-stack.h
+++ b/src/regexp-stack.h
@@ -71,6 +71,7 @@
   static size_t ArchiveSpacePerThread() { return sizeof(thread_local_); }
   static char* ArchiveStack(char* to);
   static char* RestoreStack(char* from);
+  static void FreeThreadResources() { thread_local_.Free(); }
 
  private:
   // Artificial limit used when no memory has been allocated.
@@ -92,6 +93,7 @@
     Address memory_;
     size_t memory_size_;
     Address limit_;
+    void Free();
   };
 
   // Resets the buffer if it has grown beyond the default/minimum size.
diff --git a/src/rewriter.cc b/src/rewriter.cc
index d6ea68e..11fc071 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -383,12 +383,6 @@
 }
 
 
-void AstOptimizer::VisitCallEval(CallEval* node) {
-  Visit(node->expression());
-  OptimizeArguments(node->arguments());
-}
-
-
 void AstOptimizer::VisitCallNew(CallNew* node) {
   Visit(node->expression());
   OptimizeArguments(node->arguments());
@@ -759,12 +753,6 @@
 }
 
 
-void Processor::VisitCallEval(CallEval* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
 void Processor::VisitCallNew(CallNew* node) {
   USE(node);
   UNREACHABLE();
diff --git a/src/runtime.cc b/src/runtime.cc
index 06b61e7..4e1940d 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -3020,8 +3020,20 @@
   // Some fast paths through GetKeysInFixedArrayFor reuse a cached
   // property array and since the result is mutable we have to create
   // a fresh clone on each invocation.
-  Handle<FixedArray> copy = Factory::NewFixedArray(contents->length());
-  contents->CopyTo(0, *copy, 0, contents->length());
+  int length = contents->length();
+  Handle<FixedArray> copy = Factory::NewFixedArray(length);
+  for (int i = 0; i < length; i++) {
+    Object* entry = contents->get(i);
+    if (entry->IsString()) {
+      copy->set(i, entry);
+    } else {
+      ASSERT(entry->IsNumber());
+      HandleScope scope;
+      Handle<Object> entry_handle(entry);
+      Handle<Object> entry_str = Factory::NumberToString(entry_handle);
+      copy->set(i, *entry_str);
+    }
+  }
   return *Factory::NewJSArrayWithElements(copy);
 }
 
@@ -3587,27 +3599,7 @@
   Object* number = args[0];
   RUNTIME_ASSERT(number->IsNumber());
 
-  Object* cached = Heap::GetNumberStringCache(number);
-  if (cached != Heap::undefined_value()) {
-    return cached;
-  }
-
-  char arr[100];
-  Vector<char> buffer(arr, ARRAY_SIZE(arr));
-  const char* str;
-  if (number->IsSmi()) {
-    int num = Smi::cast(number)->value();
-    str = IntToCString(num, buffer);
-  } else {
-    double num = HeapNumber::cast(number)->value();
-    str = DoubleToCString(num, buffer);
-  }
-  Object* result = Heap::AllocateStringFromAscii(CStrVector(str));
-
-  if (!result->IsFailure()) {
-    Heap::SetNumberStringCache(number, String::cast(result));
-  }
-  return result;
+  return Heap::NumberToString(number);
 }
 
 
@@ -7148,7 +7140,7 @@
   // the function being debugged.
   // function(arguments,__source__) {return eval(__source__);}
   static const char* source_str =
-      "function(arguments,__source__){return eval(__source__);}";
+      "(function(arguments,__source__){return eval(__source__);})";
   static const int source_str_length = strlen(source_str);
   Handle<String> function_source =
       Factory::NewStringFromAscii(Vector<const char>(source_str,
diff --git a/src/serialize.cc b/src/serialize.cc
index b6a9d94..94cd02a 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -922,7 +922,9 @@
       serializer_(serializer),
       reference_encoder_(serializer->reference_encoder_),
       offsets_(8),
-      addresses_(8) {
+      addresses_(8),
+      offsets_32_bit_(0),
+      data_32_bit_(0) {
   }
 
   virtual void VisitPointers(Object** start, Object** end) {
@@ -939,8 +941,12 @@
     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
     Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
     Address encoded_target = serializer_->GetSavedAddress(target);
-    offsets_.Add(rinfo->target_address_address() - obj_address_);
-    addresses_.Add(encoded_target);
+    // All calls and jumps are to code objects that encode into 32 bits.
+    offsets_32_bit_.Add(rinfo->target_address_address() - obj_address_);
+    uint32_t small_target =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(encoded_target));
+    ASSERT(reinterpret_cast<uintptr_t>(encoded_target) == small_target);
+    data_32_bit_.Add(small_target);
   }
 
 
@@ -965,6 +971,10 @@
     for (int i = 0; i < offsets_.length(); i++) {
       memcpy(start_address + offsets_[i], &addresses_[i], sizeof(Address));
     }
+    for (int i = 0; i < offsets_32_bit_.length(); i++) {
+      memcpy(start_address + offsets_32_bit_[i], &data_32_bit_[i],
+             sizeof(uint32_t));
+    }
   }
 
  private:
@@ -973,6 +983,10 @@
   ExternalReferenceEncoder* reference_encoder_;
   List<int> offsets_;
   List<Address> addresses_;
+  // Some updates are 32-bit even on a 64-bit platform.
+  // We keep a separate list of them on 64-bit platforms.
+  List<int> offsets_32_bit_;
+  List<uint32_t> data_32_bit_;
 };
 
 
@@ -1062,7 +1076,7 @@
   // No active threads.
   CHECK_EQ(NULL, ThreadState::FirstInUse());
   // No active or weak handles.
-  CHECK(HandleScopeImplementer::instance()->Blocks()->is_empty());
+  CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
   CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
   // We need a counter function during serialization to resolve the
   // references to counters in the code on the heap.
@@ -1395,7 +1409,7 @@
   // No active threads.
   ASSERT_EQ(NULL, ThreadState::FirstInUse());
   // No active handles.
-  ASSERT(HandleScopeImplementer::instance()->Blocks()->is_empty());
+  ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
   reference_decoder_ = new ExternalReferenceDecoder();
   // By setting linear allocation only, we forbid the use of free list
   // allocation which is not predicted by SimulatedAddress.
@@ -1432,7 +1446,9 @@
 
 void Deserializer::VisitCodeTarget(RelocInfo* rinfo) {
   ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
-  Address encoded_address = reinterpret_cast<Address>(rinfo->target_object());
+  // On all platforms, the encoded code object address is only 32 bits.
+  Address encoded_address = reinterpret_cast<Address>(Memory::uint32_at(
+      reinterpret_cast<Address>(rinfo->target_object_address())));
   Code* target_object = reinterpret_cast<Code*>(Resolve(encoded_address));
   rinfo->set_target_address(target_object->instruction_start());
 }
@@ -1632,8 +1648,7 @@
   obj->IterateBody(type, size, this);
 
   if (type == CODE_TYPE) {
-    Code* code = Code::cast(obj);
-    LOG(CodeMoveEvent(a, code->address()));
+    LOG(CodeMoveEvent(a, obj->address()));
   }
   objects_++;
   return o;
@@ -1664,7 +1679,6 @@
 
   // Encoded addresses of HeapObjects always have 'HeapObject' tags.
   ASSERT(o->IsHeapObject());
-
   switch (GetSpace(encoded)) {
     // For Map space and Old space, we cache the known Pages in map_pages,
     // old_pointer_pages and old_data_pages. Even though MapSpace keeps a list
diff --git a/src/spaces.cc b/src/spaces.cc
index 998debb..43abaa4 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -145,6 +145,128 @@
 #endif
 
 // -----------------------------------------------------------------------------
+// CodeRange
+
+List<CodeRange::FreeBlock> CodeRange::free_list_(0);
+List<CodeRange::FreeBlock> CodeRange::allocation_list_(0);
+int CodeRange::current_allocation_block_index_ = 0;
+VirtualMemory* CodeRange::code_range_ = NULL;
+
+
+bool CodeRange::Setup(const size_t requested) {
+  ASSERT(code_range_ == NULL);
+
+  code_range_ = new VirtualMemory(requested);
+  CHECK(code_range_ != NULL);
+  if (!code_range_->IsReserved()) {
+    delete code_range_;
+    code_range_ = NULL;
+    return false;
+  }
+
+  // We are sure that we have mapped a block of requested addresses.
+  ASSERT(code_range_->size() == requested);
+  LOG(NewEvent("CodeRange", code_range_->address(), requested));
+  allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
+  current_allocation_block_index_ = 0;
+  return true;
+}
+
+
+int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
+                                       const FreeBlock* right) {
+  // The entire point of CodeRange is that the difference between two
+  // addresses in the range can be represented as a signed 32-bit int,
+  // so the cast is semantically correct.
+  return static_cast<int>(left->start - right->start);
+}
+
+
+void CodeRange::GetNextAllocationBlock(size_t requested) {
+  for (current_allocation_block_index_++;
+       current_allocation_block_index_ < allocation_list_.length();
+       current_allocation_block_index_++) {
+    if (requested <= allocation_list_[current_allocation_block_index_].size) {
+      return;  // Found a large enough allocation block.
+    }
+  }
+
+  // Sort and merge the free blocks on the free list and the allocation list.
+  free_list_.AddAll(allocation_list_);
+  allocation_list_.Clear();
+  free_list_.Sort(&CompareFreeBlockAddress);
+  for (int i = 0; i < free_list_.length();) {
+    FreeBlock merged = free_list_[i];
+    i++;
+    // Add adjacent free blocks to the current merged block.
+    while (i < free_list_.length() &&
+           free_list_[i].start == merged.start + merged.size) {
+      merged.size += free_list_[i].size;
+      i++;
+    }
+    if (merged.size > 0) {
+      allocation_list_.Add(merged);
+    }
+  }
+  free_list_.Clear();
+
+  for (current_allocation_block_index_ = 0;
+       current_allocation_block_index_ < allocation_list_.length();
+       current_allocation_block_index_++) {
+    if (requested <= allocation_list_[current_allocation_block_index_].size) {
+      return;  // Found a large enough allocation block.
+    }
+  }
+
+  // Code range is full or too fragmented.
+  V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
+}
+
+
+
+void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
+  ASSERT(current_allocation_block_index_ < allocation_list_.length());
+  if (requested > allocation_list_[current_allocation_block_index_].size) {
+    // Find an allocation block large enough.  This function call may
+    // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
+    GetNextAllocationBlock(requested);
+  }
+  // Commit the requested memory at the start of the current allocation block.
+  *allocated = RoundUp(requested, Page::kPageSize);
+  FreeBlock current = allocation_list_[current_allocation_block_index_];
+  if (*allocated >= current.size - Page::kPageSize) {
+    // Don't leave a small free block, useless for a large object or chunk.
+    *allocated = current.size;
+  }
+  ASSERT(*allocated <= current.size);
+  if (!code_range_->Commit(current.start, *allocated, true)) {
+    *allocated = 0;
+    return NULL;
+  }
+  allocation_list_[current_allocation_block_index_].start += *allocated;
+  allocation_list_[current_allocation_block_index_].size -= *allocated;
+  if (*allocated == current.size) {
+    GetNextAllocationBlock(0);  // This block is used up, get the next one.
+  }
+  return current.start;
+}
+
+
+void CodeRange::FreeRawMemory(void* address, size_t length) {
+  free_list_.Add(FreeBlock(address, length));
+  code_range_->Uncommit(address, length);
+}
+
+
+void CodeRange::TearDown() {
+    delete code_range_;  // Frees all memory in the virtual memory range.
+    code_range_ = NULL;
+    free_list_.Free();
+    allocation_list_.Free();
+}
+
+
+// -----------------------------------------------------------------------------
 // MemoryAllocator
 //
 int MemoryAllocator::capacity_   = 0;
@@ -226,8 +348,12 @@
                                          size_t* allocated,
                                          Executability executable) {
   if (size_ + static_cast<int>(requested) > capacity_) return NULL;
-
-  void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE);
+  void* mem;
+  if (executable == EXECUTABLE  && CodeRange::exists()) {
+    mem = CodeRange::AllocateRawMemory(requested, allocated);
+  } else {
+    mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
+  }
   int alloced = *allocated;
   size_ += alloced;
   Counters::memory_allocated.Increment(alloced);
@@ -236,7 +362,11 @@
 
 
 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
-  OS::Free(mem, length);
+  if (CodeRange::contains(static_cast<Address>(mem))) {
+    CodeRange::FreeRawMemory(mem, length);
+  } else {
+    OS::Free(mem, length);
+  }
   Counters::memory_allocated.Decrement(length);
   size_ -= length;
   ASSERT(size_ >= 0);
diff --git a/src/spaces.h b/src/spaces.h
index 7170318..76b88ef 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -315,6 +315,72 @@
 
 
 // ----------------------------------------------------------------------------
+// All heap objects containing executable code (code objects) must be allocated
+// from a 2 GB range of memory, so that they can call each other using 32-bit
+// displacements.  This happens automatically on 32-bit platforms, where 32-bit
+// displacements cover the entire 4GB virtual address space.  On 64-bit
+// platforms, we support this using the CodeRange object, which reserves and
+// manages a range of virtual memory.
+class CodeRange : public AllStatic {
+ public:
+  // Reserves a range of virtual memory, but does not commit any of it.
+  // Can only be called once, at heap initialization time.
+  // Returns false on failure.
+  static bool Setup(const size_t requested_size);
+
+  // Frees the range of virtual memory, and frees the data structures used to
+  // manage it.
+  static void TearDown();
+
+  static bool exists() { return code_range_ != NULL; }
+  static bool contains(Address address) {
+    if (code_range_ == NULL) return false;
+    Address start = static_cast<Address>(code_range_->address());
+    return start <= address && address < start + code_range_->size();
+  }
+
+  // Allocates a chunk of memory from the large-object portion of
+  // the code range.  On platforms with no separate code range, should
+  // not be called.
+  static void* AllocateRawMemory(const size_t requested, size_t* allocated);
+  static void FreeRawMemory(void* buf, size_t length);
+
+ private:
+  // The reserved range of virtual memory that all code objects are put in.
+  static VirtualMemory* code_range_;
+  // Plain old data class, just a struct plus a constructor.
+  class FreeBlock {
+   public:
+    FreeBlock(Address start_arg, size_t size_arg)
+        : start(start_arg), size(size_arg) {}
+    FreeBlock(void* start_arg, size_t size_arg)
+        : start(static_cast<Address>(start_arg)), size(size_arg) {}
+
+    Address start;
+    size_t size;
+  };
+
+  // Freed blocks of memory are added to the free list.  When the allocation
+  // list is exhausted, the free list is sorted and merged to make the new
+  // allocation list.
+  static List<FreeBlock> free_list_;
+  // Memory is allocated from the free blocks on the allocation list.
+  // The block at current_allocation_block_index_ is the current block.
+  static List<FreeBlock> allocation_list_;
+  static int current_allocation_block_index_;
+
+  // Finds a block on the allocation list that contains at least the
+  // requested amount of memory.  If none is found, sorts and merges
+  // the existing free memory blocks, and searches again.
+  // If none can be found, terminates V8 with FatalProcessOutOfMemory.
+  static void GetNextAllocationBlock(size_t requested);
+  // Compares the start addresses of two free blocks.
+  static int CompareFreeBlockAddress(const FreeBlock* left,
+                                     const FreeBlock* right);
+};
+
+
+// ----------------------------------------------------------------------------
 // A space acquires chunks of memory from the operating system. The memory
 // allocator manages chunks for the paged heap spaces (old space and map
 // space).  A paged chunk consists of pages. Pages in a chunk have contiguous
@@ -380,8 +446,9 @@
   // function returns an invalid page pointer (NULL). The caller must check
   // whether the returned page is valid (by calling Page::is_valid()).  It is
   // guaranteed that allocated pages have contiguous addresses.  The actual
-  // number of allocated page is returned in the output parameter
-  // allocated_pages.
+  // number of allocated pages is returned in the output parameter
+  // allocated_pages.  If the PagedSpace owner is executable and there is
+  // a code range, the pages are allocated from the code range.
   static Page* AllocatePages(int requested_pages, int* allocated_pages,
                              PagedSpace* owner);
 
@@ -395,6 +462,9 @@
   // Allocates and frees raw memory of certain size.
   // These are just thin wrappers around OS::Allocate and OS::Free,
   // but keep track of allocated bytes as part of heap.
+  // If the flag is EXECUTABLE and a code range exists, the requested
+  // memory is allocated from the code range.  If a code range exists
+  // and the freed memory is in it, the code range manages the freed memory.
   static void* AllocateRawMemory(const size_t requested,
                                  size_t* allocated,
                                  Executability executable);
diff --git a/src/string.js b/src/string.js
index 263fac5..fbdc307 100644
--- a/src/string.js
+++ b/src/string.js
@@ -62,7 +62,7 @@
 
 // ECMA-262, section 15.5.4.4
 function StringCharAt(pos) {
-  var char_code = %_FastCharCodeAt(this, index);
+  var char_code = %_FastCharCodeAt(this, pos);
   if (!%_IsSmi(char_code)) {
     var subject = ToString(this);
     var index = TO_INTEGER(pos);
@@ -184,6 +184,14 @@
 }
 
 
+// This has the same size as the lastMatchInfo array, and can be used for
+// functions that expect that structure to be returned.  It is used when the
+// needle is a string rather than a regexp.  In this case we can't update
+// lastMatchArray without erroneously affecting the properties on the global
+// RegExp object.
+var reusableMatchInfo = [2, "", "", -1, -1];
+
+
 // ECMA-262, section 15.5.4.11
 function StringReplace(search, replace) {
   var subject = ToString(this);
@@ -224,14 +232,6 @@
 }
 
 
-// This has the same size as the lastMatchInfo array, and can be used for
-// functions that expect that structure to be returned.  It is used when the
-// needle is a string rather than a regexp.  In this case we can't update
-// lastMatchArray without erroneously affecting the properties on the global
-// RegExp object.
-var reusableMatchInfo = [2, "", "", -1, -1];
-
-
 // Helper function for regular expressions in String.prototype.replace.
 function StringReplaceRegExp(subject, regexp, replace) {
   replace = ToString(replace);
@@ -370,8 +370,8 @@
 //     'abcd'.replace(/(.)/g, function() { return RegExp.$1; }
 // should be 'abcd' and not 'dddd' (or anything else).
 function StringReplaceRegExpWithFunction(subject, regexp, replace) {
-  var lastMatchInfo = DoRegExpExec(regexp, subject, 0);
-  if (IS_NULL(lastMatchInfo)) return subject;
+  var matchInfo = DoRegExpExec(regexp, subject, 0);
+  if (IS_NULL(matchInfo)) return subject;
 
   var result = new ReplaceResultBuilder(subject);
   // There's at least one match.  If the regexp is global, we have to loop
@@ -382,11 +382,11 @@
   if (regexp.global) {
     var previous = 0;
     do {
-      result.addSpecialSlice(previous, lastMatchInfo[CAPTURE0]);
-      var startOfMatch = lastMatchInfo[CAPTURE0];
-      previous = lastMatchInfo[CAPTURE1];
-      result.add(ApplyReplacementFunction(replace, lastMatchInfo, subject));
-      // Can't use lastMatchInfo any more from here, since the function could
+      result.addSpecialSlice(previous, matchInfo[CAPTURE0]);
+      var startOfMatch = matchInfo[CAPTURE0];
+      previous = matchInfo[CAPTURE1];
+      result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+      // Can't use matchInfo any more from here, since the function could
       // overwrite it.
       // Continue with the next match.
       // Increment previous if we matched an empty string, as per ECMA-262
@@ -401,20 +401,20 @@
 
       // Per ECMA-262 15.10.6.2, if the previous index is greater than the
       // string length, there is no match
-      lastMatchInfo = (previous > subject.length)
+      matchInfo = (previous > subject.length)
           ? null
           : DoRegExpExec(regexp, subject, previous);
-    } while (!IS_NULL(lastMatchInfo));
+    } while (!IS_NULL(matchInfo));
 
     // Tack on the final right substring after the last match, if necessary.
     if (previous < subject.length) {
       result.addSpecialSlice(previous, subject.length);
     }
   } else { // Not a global regexp, no need to loop.
-    result.addSpecialSlice(0, lastMatchInfo[CAPTURE0]);
-    var endOfMatch = lastMatchInfo[CAPTURE1];
-    result.add(ApplyReplacementFunction(replace, lastMatchInfo, subject));
-    // Can't use lastMatchInfo any more from here, since the function could
+    result.addSpecialSlice(0, matchInfo[CAPTURE0]);
+    var endOfMatch = matchInfo[CAPTURE1];
+    result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+    // Can't use matchInfo any more from here, since the function could
     // overwrite it.
     result.addSpecialSlice(endOfMatch, subject.length);
   }
@@ -424,20 +424,20 @@
 
 
 // Helper function to apply a string replacement function once.
-function ApplyReplacementFunction(replace, lastMatchInfo, subject) {
+function ApplyReplacementFunction(replace, matchInfo, subject) {
   // Compute the parameter list consisting of the match, captures, index,
   // and subject for the replace function invocation.
-  var index = lastMatchInfo[CAPTURE0];
+  var index = matchInfo[CAPTURE0];
   // The number of captures plus one for the match.
-  var m = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
+  var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
   if (m == 1) {
-    var s = CaptureString(subject, lastMatchInfo, 0);
+    var s = CaptureString(subject, matchInfo, 0);
     // Don't call directly to avoid exposing the built-in global object.
     return replace.call(null, s, index, subject);
   }
   var parameters = $Array(m + 2);
   for (var j = 0; j < m; j++) {
-    parameters[j] = CaptureString(subject, lastMatchInfo, j);
+    parameters[j] = CaptureString(subject, matchInfo, j);
   }
   parameters[j] = index;
   parameters[j + 1] = subject;
@@ -539,14 +539,14 @@
       return result;
     }
 
-    var lastMatchInfo = splitMatch(separator, subject, currentIndex, startIndex);
+    var matchInfo = splitMatch(separator, subject, currentIndex, startIndex);
 
-    if (IS_NULL(lastMatchInfo)) {
+    if (IS_NULL(matchInfo)) {
       result[result.length] = subject.slice(currentIndex, length);
       return result;
     }
 
-    var endIndex = lastMatchInfo[CAPTURE1];
+    var endIndex = matchInfo[CAPTURE1];
 
     // We ignore a zero-length match at the currentIndex.
     if (startIndex === endIndex && endIndex === currentIndex) {
@@ -554,12 +554,12 @@
       continue;
     }
 
-    result[result.length] = SubString(subject, currentIndex, lastMatchInfo[CAPTURE0]);
+    result[result.length] = SubString(subject, currentIndex, matchInfo[CAPTURE0]);
     if (result.length === limit) return result;
 
-    for (var i = 2; i < NUMBER_OF_CAPTURES(lastMatchInfo); i += 2) {
-      var start = lastMatchInfo[CAPTURE(i)];
-      var end = lastMatchInfo[CAPTURE(i + 1)];
+    for (var i = 2; i < NUMBER_OF_CAPTURES(matchInfo); i += 2) {
+      var start = matchInfo[CAPTURE(i)];
+      var end = matchInfo[CAPTURE(i + 1)];
       if (start != -1 && end != -1) {
         result[result.length] = SubString(subject, start, end);
       } else {
@@ -574,16 +574,16 @@
 
 
 // ECMA-262 section 15.5.4.14
-// Helper function used by split.  This version returns the lastMatchInfo
+// Helper function used by split.  This version returns the matchInfo
 // instead of allocating a new array with basically the same information.
 function splitMatch(separator, subject, current_index, start_index) {
   if (IS_REGEXP(separator)) {
-    var lastMatchInfo = DoRegExpExec(separator, subject, start_index);
-    if (lastMatchInfo == null) return null;
+    var matchInfo = DoRegExpExec(separator, subject, start_index);
+    if (matchInfo == null) return null;
     // Section 15.5.4.14 paragraph two says that we do not allow zero length
     // matches at the end of the string.
-    if (lastMatchInfo[CAPTURE0] === subject.length) return null;
-    return lastMatchInfo;
+    if (matchInfo[CAPTURE0] === subject.length) return null;
+    return matchInfo;
   }
 
   var separatorIndex = subject.indexOf(separator, start_index);
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 2906c22..e10dc61 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -735,28 +735,17 @@
 
 
 Object* LoadCallbackProperty(Arguments args) {
-  Handle<JSObject> recv = args.at<JSObject>(0);
-  Handle<JSObject> holder = args.at<JSObject>(1);
   AccessorInfo* callback = AccessorInfo::cast(args[2]);
-  Handle<Object> data = args.at<Object>(3);
   Address getter_address = v8::ToCData<Address>(callback->getter());
   v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
   ASSERT(fun != NULL);
-  Handle<String> name = args.at<String>(4);
-  // NOTE: If we can align the structure of an AccessorInfo with the
-  // locations of the arguments to this function maybe we don't have
-  // to explicitly create the structure but can just pass a pointer
-  // into the stack.
-  LOG(ApiNamedPropertyAccess("load", *recv, *name));
-  v8::AccessorInfo info(v8::Utils::ToLocal(recv),
-                        v8::Utils::ToLocal(data),
-                        v8::Utils::ToLocal(holder));
+  v8::AccessorInfo info(args.arguments());
   HandleScope scope;
   v8::Handle<v8::Value> result;
   {
     // Leaving JavaScript.
     VMState state(EXTERNAL);
-    result = fun(v8::Utils::ToLocal(name), info);
+    result = fun(v8::Utils::ToLocal(args.at<String>(4)), info);
   }
   RETURN_IF_SCHEDULED_EXCEPTION();
   if (result.IsEmpty()) return Heap::undefined_value();
@@ -765,7 +754,7 @@
 
 
 Object* StoreCallbackProperty(Arguments args) {
-  Handle<JSObject> recv = args.at<JSObject>(0);
+  JSObject* recv = JSObject::cast(args[0]);
   AccessorInfo* callback = AccessorInfo::cast(args[1]);
   Address setter_address = v8::ToCData<Address>(callback->setter());
   v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
@@ -773,11 +762,9 @@
   Handle<String> name = args.at<String>(2);
   Handle<Object> value = args.at<Object>(3);
   HandleScope scope;
-  Handle<Object> data(callback->data());
-  LOG(ApiNamedPropertyAccess("store", *recv, *name));
-  v8::AccessorInfo info(v8::Utils::ToLocal(recv),
-                        v8::Utils::ToLocal(data),
-                        v8::Utils::ToLocal(recv));
+  LOG(ApiNamedPropertyAccess("store", recv, *name));
+  CustomArguments custom_args(callback->data(), recv, recv);
+  v8::AccessorInfo info(custom_args.end());
   {
     // Leaving JavaScript.
     VMState state(EXTERNAL);
@@ -795,11 +782,11 @@
  * provide any value for the given name.
  */
 Object* LoadPropertyWithInterceptorOnly(Arguments args) {
-  Handle<JSObject> receiver_handle = args.at<JSObject>(0);
-  Handle<JSObject> holder_handle = args.at<JSObject>(1);
+  JSObject* receiver_handle = JSObject::cast(args[0]);
+  JSObject* holder_handle = JSObject::cast(args[1]);
   Handle<String> name_handle = args.at<String>(2);
   Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(3);
-  Handle<Object> data_handle = args.at<Object>(4);
+  Object* data_handle = args[4];
 
   Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
   v8::NamedPropertyGetter getter =
@@ -808,9 +795,8 @@
 
   {
     // Use the interceptor getter.
-    v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
-                          v8::Utils::ToLocal(data_handle),
-                          v8::Utils::ToLocal(holder_handle));
+    CustomArguments args(data_handle, receiver_handle, holder_handle);
+    v8::AccessorInfo info(args.end());
     HandleScope scope;
     v8::Handle<v8::Value> r;
     {
@@ -861,9 +847,8 @@
 
   {
     // Use the interceptor getter.
-    v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
-                          v8::Utils::ToLocal(data_handle),
-                          v8::Utils::ToLocal(holder_handle));
+    CustomArguments args(*data_handle, *receiver_handle, *holder_handle);
+    v8::AccessorInfo info(args.end());
     HandleScope scope;
     v8::Handle<v8::Value> r;
     {
diff --git a/src/top.cc b/src/top.cc
index 039c292..aa7788e 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -98,7 +98,8 @@
   thread_local_.stack_is_cooked_ = false;
   thread_local_.try_catch_handler_ = NULL;
   thread_local_.context_ = NULL;
-  thread_local_.thread_id_ = ThreadManager::kInvalidId;
+  int id = ThreadManager::CurrentId();
+  thread_local_.thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
   thread_local_.external_caught_exception_ = false;
   thread_local_.failed_access_check_callback_ = NULL;
   clear_pending_exception();
diff --git a/src/top.h b/src/top.h
index 5b3d6a0..ae94f08 100644
--- a/src/top.h
+++ b/src/top.h
@@ -78,6 +78,12 @@
 
   // Call back function to report unsafe JS accesses.
   v8::FailedAccessCheckCallback failed_access_check_callback_;
+
+  void Free() {
+    ASSERT(!has_pending_message_);
+    ASSERT(!external_caught_exception_);
+    ASSERT(try_catch_handler_ == NULL);
+  }
 };
 
 #define TOP_ADDRESS_LIST(C) \
@@ -316,6 +322,7 @@
   static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
   static char* ArchiveThread(char* to);
   static char* RestoreThread(char* from);
+  static void FreeThreadResources() { thread_local_.Free(); }
 
   static const char* kStackOverflowMessage;
 
diff --git a/src/uri.js b/src/uri.js
index 0dfe765..5af71b6 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -30,6 +30,11 @@
 
 // Expect $String = global.String;
 
+// Lazily initialized.
+var hexCharArray = 0;
+var hexCharCodeArray = 0;
+
+
 function URIAddEncodedOctetToBuffer(octet, result, index) {
   result[index++] = 37; // Char code of '%'.
   result[index++] = hexCharCodeArray[octet >> 4];
@@ -320,11 +325,6 @@
 }
 
 
-// Lazily initialized.
-var hexCharArray = 0;
-var hexCharCodeArray = 0;
-
-
 function HexValueOf(c) {
   var code = c.charCodeAt(0);
   
diff --git a/src/usage-analyzer.cc b/src/usage-analyzer.cc
index 5514f40..23a4d9f 100644
--- a/src/usage-analyzer.cc
+++ b/src/usage-analyzer.cc
@@ -44,45 +44,12 @@
  public:
   static bool Traverse(AstNode* node);
 
-  void VisitBlock(Block* node);
-  void VisitDeclaration(Declaration* node);
-  void VisitExpressionStatement(ExpressionStatement* node);
-  void VisitEmptyStatement(EmptyStatement* node);
-  void VisitIfStatement(IfStatement* node);
-  void VisitContinueStatement(ContinueStatement* node);
-  void VisitBreakStatement(BreakStatement* node);
-  void VisitReturnStatement(ReturnStatement* node);
-  void VisitWithEnterStatement(WithEnterStatement* node);
-  void VisitWithExitStatement(WithExitStatement* node);
-  void VisitSwitchStatement(SwitchStatement* node);
-  void VisitLoopStatement(LoopStatement* node);
-  void VisitForInStatement(ForInStatement* node);
-  void VisitTryCatch(TryCatch* node);
-  void VisitTryFinally(TryFinally* node);
-  void VisitDebuggerStatement(DebuggerStatement* node);
-  void VisitFunctionLiteral(FunctionLiteral* node);
-  void VisitFunctionBoilerplateLiteral(FunctionBoilerplateLiteral* node);
-  void VisitConditional(Conditional* node);
-  void VisitSlot(Slot* node);
-  void VisitVariable(Variable* node);
-  void VisitVariableProxy(VariableProxy* node);
-  void VisitLiteral(Literal* node);
-  void VisitRegExpLiteral(RegExpLiteral* node);
-  void VisitObjectLiteral(ObjectLiteral* node);
-  void VisitArrayLiteral(ArrayLiteral* node);
-  void VisitCatchExtensionObject(CatchExtensionObject* node);
-  void VisitAssignment(Assignment* node);
-  void VisitThrow(Throw* node);
-  void VisitProperty(Property* node);
-  void VisitCall(Call* node);
-  void VisitCallEval(CallEval* node);
-  void VisitCallNew(CallNew* node);
-  void VisitCallRuntime(CallRuntime* node);
-  void VisitUnaryOperation(UnaryOperation* node);
-  void VisitCountOperation(CountOperation* node);
-  void VisitBinaryOperation(BinaryOperation* node);
-  void VisitCompareOperation(CompareOperation* node);
-  void VisitThisFunction(ThisFunction* node);
+  // AST node visit functions.
+#define DECLARE_VISIT(type) void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  void VisitVariable(Variable* var);
 
  private:
   int weight_;
@@ -329,13 +296,9 @@
 }
 
 
-void UsageComputer::VisitCallEval(CallEval* node) {
-  VisitCall(node);
-}
-
-
 void UsageComputer::VisitCallNew(CallNew* node) {
-  VisitCall(node);
+  Read(node->expression());
+  ReadList(node->arguments());
 }
 
 
diff --git a/src/utils.cc b/src/utils.cc
index d56d279..3c684b8 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -239,7 +239,7 @@
   FILE* f = OS::FOpen(filename, "wb");
   if (f == NULL) {
     if (verbose) {
-      OS::PrintError("Cannot open file %s for reading.\n", filename);
+      OS::PrintError("Cannot open file %s for writing.\n", filename);
     }
     return 0;
   }
diff --git a/src/v8.cc b/src/v8.cc
index a204158..3f8e6cd 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -71,6 +71,14 @@
   ::assembler::arm::Simulator::Initialize();
 #endif
 
+  { // NOLINT
+    // Ensure that the thread has a valid stack guard.  The v8::Locker object
+    // will ensure this too, but we don't have to use lockers if we are only
+    // using one thread.
+    ExecutionAccess lock;
+    StackGuard::InitThread(lock);
+  }
+
   // Setup the object heap
   ASSERT(!Heap::HasBeenSetup());
   if (!Heap::Setup(create_heap_objects)) {
@@ -161,10 +169,10 @@
 }
 
 
-bool V8::IdleNotification(bool is_high_priority) {
-  if (!FLAG_use_idle_notification) return false;
-  // Ignore high priority instances of V8.
-  if (is_high_priority) return false;
+bool V8::IdleNotification() {
+  // Returning true tells the caller that there is no need to call
+  // IdleNotification again.
+  if (!FLAG_use_idle_notification) return true;
 
   // Tell the heap that it may want to adjust.
   return Heap::IdleNotification();
diff --git a/src/v8.h b/src/v8.h
index 7786d66..106ae61 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -95,7 +95,7 @@
   static Smi* RandomPositiveSmi();
 
   // Idle notification directly from the API.
-  static bool IdleNotification(bool is_high_priority);
+  static bool IdleNotification();
 
  private:
   // True if engine is currently running
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 3022a7e..80a7cd9 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -56,10 +56,20 @@
   if (!internal::ThreadManager::IsLockedByCurrentThread()) {
     internal::ThreadManager::Lock();
     has_lock_ = true;
+    // Make sure that V8 is initialized.  Archiving of threads interferes
+    // with deserialization by adding additional root pointers, so we must
+    // initialize here, before anyone can call ~Locker() or Unlocker().
+    if (!internal::V8::IsRunning()) {
+      V8::Initialize();
+    }
     // This may be a locker within an unlocker in which case we have to
     // get the saved state for this thread and restore it.
     if (internal::ThreadManager::RestoreThread()) {
       top_level_ = false;
+    } else {
+      internal::ExecutionAccess access;
+      internal::StackGuard::ClearThread(access);
+      internal::StackGuard::InitThread(access);
     }
   }
   ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
@@ -77,7 +87,9 @@
 Locker::~Locker() {
   ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
   if (has_lock_) {
-    if (!top_level_) {
+    if (top_level_) {
+      internal::ThreadManager::FreeThreadResources();
+    } else {
       internal::ThreadManager::ArchiveThread();
     }
     internal::ThreadManager::Unlock();
@@ -139,11 +151,14 @@
   ThreadState* state =
       reinterpret_cast<ThreadState*>(Thread::GetThreadLocal(thread_state_key));
   if (state == NULL) {
+    // This is a new thread.
+    StackGuard::InitThread(access);
     return false;
   }
   char* from = state->data();
   from = HandleScopeImplementer::RestoreThread(from);
   from = Top::RestoreThread(from);
+  from = Relocatable::RestoreState(from);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   from = Debug::RestoreDebug(from);
 #endif
@@ -183,7 +198,8 @@
 #endif
                      StackGuard::ArchiveSpacePerThread() +
                     RegExpStack::ArchiveSpacePerThread() +
-                   Bootstrapper::ArchiveSpacePerThread();
+                   Bootstrapper::ArchiveSpacePerThread() +
+                    Relocatable::ArchiveSpacePerThread();
 }
 
 
@@ -273,6 +289,7 @@
   // in ThreadManager::Iterate(ObjectVisitor*).
   to = HandleScopeImplementer::ArchiveThread(to);
   to = Top::ArchiveThread(to);
+  to = Relocatable::ArchiveState(to);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   to = Debug::ArchiveDebug(to);
 #endif
@@ -284,6 +301,18 @@
 }
 
 
+void ThreadManager::FreeThreadResources() {
+  HandleScopeImplementer::FreeThreadResources();
+  Top::FreeThreadResources();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  Debug::FreeThreadResources();
+#endif
+  StackGuard::FreeThreadResources();
+  RegExpStack::FreeThreadResources();
+  Bootstrapper::FreeThreadResources();
+}
+
+
 bool ThreadManager::IsArchived() {
   return Thread::HasThreadLocal(thread_state_key);
 }
@@ -297,6 +326,7 @@
     char* data = state->data();
     data = HandleScopeImplementer::Iterate(v, data);
     data = Top::Iterate(v, data);
+    data = Relocatable::Iterate(v, data);
   }
 }
 
diff --git a/src/v8threads.h b/src/v8threads.h
index f808e54..0684053 100644
--- a/src/v8threads.h
+++ b/src/v8threads.h
@@ -86,6 +86,7 @@
 
   static void ArchiveThread();
   static bool RestoreThread();
+  static void FreeThreadResources();
   static bool IsArchived();
 
   static void Iterate(ObjectVisitor* v);
diff --git a/src/variables.h b/src/variables.h
index c2adb23..ca78b5f 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -171,7 +171,7 @@
   UseCount* var_uses()  { return &var_uses_; }
   UseCount* obj_uses()  { return &obj_uses_; }
 
-  bool IsVariable(Handle<String> n) {
+  bool IsVariable(Handle<String> n) const {
     return !is_this() && name().is_identical_to(n);
   }
 
@@ -185,6 +185,12 @@
   bool is_this() const { return kind_ == THIS; }
   bool is_arguments() const { return kind_ == ARGUMENTS; }
 
+  // True if the variable is named eval and not known to be shadowed.
+  bool is_possibly_eval() const {
+    return IsVariable(Factory::eval_symbol()) &&
+        (mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
+  }
+
   Variable* local_if_not_shadowed() const {
     ASSERT(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
     return local_if_not_shadowed_;
diff --git a/src/version.cc b/src/version.cc
index 7c6910b..28815cc 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     1
 #define MINOR_VERSION     3
-#define BUILD_NUMBER      13
-#define PATCH_LEVEL       5
+#define BUILD_NUMBER      14
+#define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index f51a3ea..899a17c 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -70,6 +70,20 @@
 }
 
 
+void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  RecordRelocInfo(rmode);
+  int current = code_targets_.length();
+  if (current > 0 && code_targets_.last().is_identical_to(target)) {
+    // Optimization if we keep jumping to the same code target.
+    emitl(current - 1);
+  } else {
+    code_targets_.Add(target);
+    emitl(current);
+  }
+}
+
+
 void Assembler::emit_rex_64(Register reg, Register rm_reg) {
   emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
 }
@@ -162,15 +176,18 @@
 
 
 Address Assembler::target_address_at(Address pc) {
-  return Memory::Address_at(pc);
+  return Memory::int32_at(pc) + pc + 4;
 }
 
 
 void Assembler::set_target_address_at(Address pc, Address target) {
-  Memory::Address_at(pc) = target;
-  CPU::FlushICache(pc, sizeof(intptr_t));
+  Memory::int32_at(pc) = target - pc - 4;
+  CPU::FlushICache(pc, sizeof(int32_t));
 }
 
+Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
+  return code_targets_[Memory::int32_at(pc)];
+}
 
 // -----------------------------------------------------------------------------
 // Implementation of RelocInfo
@@ -179,15 +196,24 @@
 void RelocInfo::apply(intptr_t delta) {
   if (IsInternalReference(rmode_)) {
     // absolute code pointer inside code object moves with the code object.
-    intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
-    *p += delta;  // relocate entry
+    Memory::Address_at(pc_) += delta;
+  } else if (IsCodeTarget(rmode_)) {
+    Memory::int32_at(pc_) -= delta;
+  } else if (rmode_ == JS_RETURN && IsCallInstruction()) {
+    // Special handling of js_return when a break point is set (call
+    // instruction has been inserted).
+    Memory::int32_at(pc_ + 1) -= delta;  // relocate entry
   }
 }
 
 
 Address RelocInfo::target_address() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
-  return Assembler::target_address_at(pc_);
+  if (IsCodeTarget(rmode_)) {
+    return Assembler::target_address_at(pc_);
+  } else {
+    return Memory::Address_at(pc_);
+  }
 }
 
 
@@ -199,13 +225,27 @@
 
 void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
-  Assembler::set_target_address_at(pc_, target);
+  if (IsCodeTarget(rmode_)) {
+    Assembler::set_target_address_at(pc_, target);
+  } else {
+    Memory::Address_at(pc_) = target;
+  }
 }
 
 
 Object* RelocInfo::target_object() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return *reinterpret_cast<Object**>(pc_);
+  return Memory::Object_at(pc_);
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  if (rmode_ == EMBEDDED_OBJECT) {
+    return Memory::Object_Handle_at(pc_);
+  } else {
+    return origin->code_target_object_handle_at(pc_);
+  }
 }
 
 
@@ -240,16 +280,15 @@
 
 Address RelocInfo::call_address() {
   ASSERT(IsCallInstruction());
-  return Assembler::target_address_at(
-      pc_ + Assembler::kPatchReturnSequenceAddressOffset);
+  return Memory::Address_at(
+      pc_ + Assembler::kRealPatchReturnSequenceAddressOffset);
 }
 
 
 void RelocInfo::set_call_address(Address target) {
   ASSERT(IsCallInstruction());
-  Assembler::set_target_address_at(
-      pc_ + Assembler::kPatchReturnSequenceAddressOffset,
-      target);
+  Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
+      target;
 }
 
 
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index b4204a9..cf79a43 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -264,7 +264,8 @@
 
 byte* Assembler::spare_buffer_ = NULL;
 
-Assembler::Assembler(void* buffer, int buffer_size) {
+Assembler::Assembler(void* buffer, int buffer_size)
+    : code_targets_(100) {
   if (buffer == NULL) {
     // do our own buffer management
     if (buffer_size <= kMinimalBufferSize) {
@@ -762,6 +763,15 @@
 }
 
 
+void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // 1110 1000 #32-bit disp
+  emit(0xE8);
+  emit_code_target(target, rmode);
+}
+
+
 void Assembler::call(Register adr) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1062,6 +1072,19 @@
 }
 
 
+void Assembler::j(Condition cc,
+                  Handle<Code> target,
+                  RelocInfo::Mode rmode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint4(cc));
+  // 0000 1111 1000 tttn #32-bit disp
+  emit(0x0F);
+  emit(0x80 | cc);
+  emit_code_target(target, rmode);
+}
+
+
 void Assembler::jmp(Label* L) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1093,6 +1116,15 @@
 }
 
 
+void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // 1110 1001 #32-bit disp
+  emit(0xE9);
+  emit_code_target(target, rmode);
+}
+
+
 void Assembler::jmp(Register target) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2387,7 +2419,8 @@
 }
 
 
-const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
-
+const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
+                                  1 << RelocInfo::INTERNAL_REFERENCE |
+                                  1 << RelocInfo::JS_RETURN;
 
 } }  // namespace v8::internal
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 697dd54..e17a55d 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -440,18 +440,26 @@
   // Assembler functions are invoked in between GetCode() calls.
   void GetCode(CodeDesc* desc);
 
-  // Read/Modify the code target in the branch/call instruction at pc.
-  // On the x64 architecture, the address is absolute, not relative.
+  // Read/Modify the code target in the relative branch/call instruction at pc.
+  // On the x64 architecture, we use relative jumps with a 32-bit displacement
+  // to jump to other Code objects in the Code space in the heap.
+  // Jumps to C functions are done indirectly through a 64-bit register holding
+  // the absolute address of the target.
+  // These functions convert between absolute Addresses of Code objects and
+  // the relative displacements stored in the code.
   static inline Address target_address_at(Address pc);
   static inline void set_target_address_at(Address pc, Address target);
-
+  inline Handle<Object> code_target_object_handle_at(Address pc);
   // Distance between the address of the code target in the call instruction
-  // and the return address.  Checked in the debug build.
-  static const int kCallTargetAddressOffset = 3 + kPointerSize;
-  // Distance between start of patched return sequence and the emitted address
-  // to jump to (movq = REX.W 0xB8+r.).
-  static const int kPatchReturnSequenceAddressOffset = 2;
-
+  // and the return address pushed on the stack.
+  static const int kCallTargetAddressOffset = 4;  // Use 32-bit displacement.
+  // Distance between the start of the JS return sequence and where the
+  // 32-bit displacement of a near call would be, relative to the pushed
+  // return address.  TODO: Use return sequence length instead.
+  // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
+  static const int kPatchReturnSequenceAddressOffset = 13 - 4;
+  // TODO(X64): Rename this, removing the "Real", after changing the above.
+  static const int kRealPatchReturnSequenceAddressOffset = 2;
   // ---------------------------------------------------------------------------
   // Code generation
   //
@@ -687,6 +695,10 @@
     immediate_arithmetic_op(0x4, dst, src);
   }
 
+  void andl(Register dst, Immediate src) {
+    immediate_arithmetic_op_32(0x4, dst, src);
+  }
+
   void decq(Register dst);
   void decq(const Operand& dst);
   void decl(Register dst);
@@ -919,6 +931,7 @@
   // Calls
   // Call near relative 32-bit displacement, relative to next instruction.
   void call(Label* L);
+  void call(Handle<Code> target, RelocInfo::Mode rmode);
 
   // Call near absolute indirect, address in register
   void call(Register adr);
@@ -928,7 +941,9 @@
 
   // Jumps
   // Jump short or near relative.
+  // Use a 32-bit signed displacement.
   void jmp(Label* L);  // unconditional jump to L
+  void jmp(Handle<Code> target, RelocInfo::Mode rmode);
 
   // Jump near absolute indirect (r64)
   void jmp(Register adr);
@@ -938,6 +953,7 @@
 
   // Conditional jumps
   void j(Condition cc, Label* L);
+  void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
 
   // Floating-point operations
   void fld(int i);
@@ -1043,14 +1059,6 @@
   void RecordStatementPosition(int pos);
   void WriteRecordedPositions();
 
-  // Writes a doubleword of data in the code stream.
-  // Used for inline tables, e.g., jump-tables.
-  // void dd(uint32_t data);
-
-  // Writes a quadword of data in the code stream.
-  // Used for inline tables, e.g., jump-tables.
-  // void dd(uint64_t data, RelocInfo::Mode reloc_info);
-
   int pc_offset() const  { return pc_ - buffer_; }
   int current_statement_position() const { return current_statement_position_; }
   int current_position() const  { return current_position_; }
@@ -1092,9 +1100,9 @@
 
   void emit(byte x) { *pc_++ = x; }
   inline void emitl(uint32_t x);
-  inline void emit(Handle<Object> handle);
   inline void emitq(uint64_t x, RelocInfo::Mode rmode);
   inline void emitw(uint16_t x);
+  inline void emit_code_target(Handle<Code> target, RelocInfo::Mode rmode);
   void emit(Immediate x) { emitl(x.value_); }
 
   // Emits a REX prefix that encodes a 64-bit operand size and
@@ -1272,6 +1280,7 @@
   byte* pc_;  // the program counter; moves forward
   RelocInfoWriter reloc_info_writer;
 
+  List< Handle<Code> > code_targets_;
   // push-pop elimination
   byte* last_pc_;
 
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index d399a88..35eddc4 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -41,10 +41,10 @@
   __ movq(Operand(kScratchRegister, 0), rdi);
 
   // The actual argument count has already been loaded into register
-  // rax, but JumpToBuiltin expects rax to contain the number of
+  // rax, but JumpToRuntime expects rax to contain the number of
   // arguments including the receiver.
   __ incq(rax);
-  __ JumpToBuiltin(ExternalReference(id), 1);
+  __ JumpToRuntime(ExternalReference(id), 1);
 }
 
 
@@ -452,8 +452,391 @@
 }
 
 
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the global context.
+  __ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ movq(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
+  // Load the Array function from the global context.
+  __ movq(result,
+          Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Number of empty elements to allocate for an empty array.
+static const int kPreallocatedArrayElements = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. If the parameter initial_capacity is larger than zero an elements
+// backing store is allocated with this size and filled with the hole values.
+// Otherwise the elements backing store is set to the empty FixedArray.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+                                 Register array_function,
+                                 Register result,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Register scratch3,
+                                 int initial_capacity,
+                                 Label* gc_required) {
+  ASSERT(initial_capacity >= 0);
+
+  // Load the initial map from the array function.
+  __ movq(scratch1, FieldOperand(array_function,
+                                 JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Allocate the JSArray object together with space for a fixed array with the
+  // requested elements.
+  int size = JSArray::kSize;
+  if (initial_capacity > 0) {
+    size += FixedArray::SizeFor(initial_capacity);
+  }
+  __ AllocateInNewSpace(size,
+                        result,
+                        scratch2,
+                        scratch3,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // scratch1: initial map
+  // scratch2: start of next object
+  __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
+  __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
+          Factory::empty_fixed_array());
+  // Field JSArray::kElementsOffset is initialized later.
+  __ movq(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
+
+  // If no storage is requested for the elements array just set the empty
+  // fixed array.
+  if (initial_capacity == 0) {
+    __ Move(FieldOperand(result, JSArray::kElementsOffset),
+            Factory::empty_fixed_array());
+    return;
+  }
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // scratch2: start of next object
+  __ lea(scratch1, Operand(result, JSArray::kSize));
+  __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
+
+  // Initialize the FixedArray and fill it with holes. FixedArray length is not
+  // stored as a smi.
+  // result: JSObject
+  // scratch1: elements array
+  // scratch2: start of next object
+  __ Move(FieldOperand(scratch1, JSObject::kMapOffset),
+          Factory::fixed_array_map());
+  __ movq(FieldOperand(scratch1, Array::kLengthOffset),
+          Immediate(initial_capacity));
+
+  // Fill the FixedArray with the hole value. Inline the code if short.
+  // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
+  static const int kLoopUnfoldLimit = 4;
+  ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
+  __ Move(scratch3, Factory::the_hole_value());
+  if (initial_capacity <= kLoopUnfoldLimit) {
+    // Use a scratch register here to have only one reloc info when unfolding
+    // the loop.
+    for (int i = 0; i < initial_capacity; i++) {
+      __ movq(FieldOperand(scratch1,
+                           FixedArray::kHeaderSize + i * kPointerSize),
+              scratch3);
+    }
+  } else {
+    Label loop, entry;
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ movq(Operand(scratch1, 0), scratch3);
+    __ addq(scratch1, Immediate(kPointerSize));
+    __ bind(&entry);
+    __ cmpq(scratch1, scratch2);
+    __ j(below, &loop);
+  }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array and elements_array_end  (see
+// below for when that is not the case). If the parameter fill_with_holes is
+// true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+                            Register array_function,  // Array function.
+                            Register array_size,  // As a smi.
+                            Register result,
+                            Register elements_array,
+                            Register elements_array_end,
+                            Register scratch,
+                            bool fill_with_hole,
+                            Label* gc_required) {
+  Label not_empty, allocated;
+
+  // Load the initial map from the array function.
+  __ movq(elements_array,
+          FieldOperand(array_function,
+                       JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ testq(array_size, array_size);
+  __ j(not_zero, &not_empty);
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size,
+                        result,
+                        elements_array_end,
+                        scratch,
+                        gc_required,
+                        TAG_OBJECT);
+  __ jmp(&allocated);
+
+  // Allocate the JSArray object together with space for a FixedArray with the
+  // requested elements.
+  __ bind(&not_empty);
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+                        times_half_pointer_size,  // array_size is a smi.
+                        array_size,
+                        result,
+                        elements_array_end,
+                        scratch,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // elements_array: initial map
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  __ bind(&allocated);
+  __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
+  __ Move(elements_array, Factory::empty_fixed_array());
+  __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
+  // Field JSArray::kElementsOffset is initialized later.
+  __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  __ lea(elements_array, Operand(result, JSArray::kSize));
+  __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
+
+  // Initialize the fixed array. FixedArray length is not stored as a smi.
+  // result: JSObject
+  // elements_array: elements array
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  ASSERT(kSmiTag == 0);
+  __ SmiToInteger64(array_size, array_size);
+  __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
+          Factory::fixed_array_map());
+  Label not_empty_2, fill_array;
+  __ testq(array_size, array_size);
+  __ j(not_zero, &not_empty_2);
+  // Length of the FixedArray is the number of pre-allocated elements even
+  // though the actual JSArray has length 0.
+  __ movq(FieldOperand(elements_array, Array::kLengthOffset),
+          Immediate(kPreallocatedArrayElements));
+  __ jmp(&fill_array);
+  __ bind(&not_empty_2);
+  // For non-empty JSArrays the length of the FixedArray and the JSArray is the
+  // same.
+  __ movq(FieldOperand(elements_array, Array::kLengthOffset), array_size);
+
+  // Fill the allocated FixedArray with the hole value if requested.
+  // result: JSObject
+  // elements_array: elements array
+  // elements_array_end: start of next object
+  __ bind(&fill_array);
+  if (fill_with_hole) {
+    Label loop, entry;
+    __ Move(scratch, Factory::the_hole_value());
+    __ lea(elements_array, Operand(elements_array,
+                                   FixedArray::kHeaderSize - kHeapObjectTag));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ movq(Operand(elements_array, 0), scratch);
+    __ addq(elements_array, Immediate(kPointerSize));
+    __ bind(&entry);
+    __ cmpq(elements_array, elements_array_end);
+    __ j(below, &loop);
+  }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+//   rdi: constructor (built-in Array function)
+//   rax: argc
+//   rsp[0]: return address
+//   rsp[8]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in rdi needs to be preserved for
+// entering the generic code. In both cases argc in rax needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// a construct call and a normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+                            Label *call_generic_code) {
+  Label argc_one_or_more, argc_two_or_more;
+
+  // Check for array construction with zero arguments.
+  __ testq(rax, rax);
+  __ j(not_zero, &argc_one_or_more);
+
+  // Handle construction of an empty array.
+  AllocateEmptyJSArray(masm,
+                       rdi,
+                       rbx,
+                       rcx,
+                       rdx,
+                       r8,
+                       kPreallocatedArrayElements,
+                       call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+  __ movq(rax, rbx);
+  __ ret(kPointerSize);
+
+  // Check for one argument. Bail out if argument is not smi or if it is
+  // negative.
+  __ bind(&argc_one_or_more);
+  __ cmpq(rax, Immediate(1));
+  __ j(not_equal, &argc_two_or_more);
+  __ movq(rdx, Operand(rsp, kPointerSize));  // Get the argument from the stack.
+  Condition not_positive_smi = __ CheckNotPositiveSmi(rdx);
+  __ j(not_positive_smi, call_generic_code);
+
+  // Handle construction of an empty array of a certain size. Bail out if size
+  // is to large to actually allocate an elements array.
+  __ JumpIfSmiGreaterEqualsConstant(rdx,
+                                    JSObject::kInitialMaxFastElementArray,
+                                    call_generic_code);
+
+  // rax: argc
+  // rdx: array_size (smi)
+  // rdi: constructor
+  // esp[0]: return address
+  // esp[8]: argument
+  AllocateJSArray(masm,
+                  rdi,
+                  rdx,
+                  rbx,
+                  rcx,
+                  r8,
+                  r9,
+                  true,
+                  call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+  __ movq(rax, rbx);
+  __ ret(2 * kPointerSize);
+
+  // Handle construction of an array from a list of arguments.
+  __ bind(&argc_two_or_more);
+  __ movq(rdx, rax);
+  __ Integer32ToSmi(rdx, rdx);  // Convet argc to a smi.
+  // rax: argc
+  // rdx: array_size (smi)
+  // rdi: constructor
+  // esp[0] : return address
+  // esp[8] : last argument
+  AllocateJSArray(masm,
+                  rdi,
+                  rdx,
+                  rbx,
+                  rcx,
+                  r8,
+                  r9,
+                  false,
+                  call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+
+  // rax: argc
+  // rbx: JSArray
+  // rcx: elements_array
+  // r8: elements_array_end (untagged)
+  // esp[0]: return address
+  // esp[8]: last argument
+
+  // Location of the last argument
+  __ lea(r9, Operand(rsp, kPointerSize));
+
+  // Location of the first array element (Parameter fill_with_holes to
+  // AllocateJSArrayis false, so the FixedArray is returned in rcx).
+  __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
+
+  // rax: argc
+  // rbx: JSArray
+  // rdx: location of the first array element
+  // r9: location of the last argument
+  // esp[0]: return address
+  // esp[8]: last argument
+  Label loop, entry;
+  __ movq(rcx, rax);
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
+  __ movq(Operand(rdx, 0), kScratchRegister);
+  __ addq(rdx, Immediate(kPointerSize));
+  __ bind(&entry);
+  __ decq(rcx);
+  __ j(greater_equal, &loop);
+
+  // Remove caller arguments from the stack and return.
+  // rax: argc
+  // rbx: JSArray
+  // esp[0]: return address
+  // esp[8]: last argument
+  __ pop(rcx);
+  __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+  __ push(rcx);
+  __ movq(rax, rbx);
+  __ ret(0);
+}
+
+
 void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
-  // Just jump to the generic array code.
+  // ----------- S t a t e -------------
+  //  -- rax : argc
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : last argument
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, rdi);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array function shoud be a map.
+    __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    ASSERT(kSmiTag == 0);
+    Condition not_smi = __ CheckNotSmi(rbx);
+    __ Assert(not_smi, "Unexpected initial map for Array function");
+    __ CmpObjectType(rbx, MAP_TYPE, rcx);
+    __ Assert(equal, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  ArrayNativeCode(masm, &generic_array_code);
+
+  // Jump to the generic array code in case the specialized code cannot handle
+  // the construction.
+  __ bind(&generic_array_code);
   Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
   Handle<Code> array_code(code);
   __ Jump(array_code, RelocInfo::CODE_TARGET);
@@ -461,7 +844,36 @@
 
 
 void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
-  // Just jump to the generic construct code.
+  // ----------- S t a t e -------------
+  //  -- rax : argc
+  //  -- rdi : constructor
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : last argument
+  // -----------------------------------
+  Label generic_constructor;
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the builtin Array function which
+    // does always have a map.
+    GenerateLoadArrayFunction(masm, rbx);
+    __ cmpq(rdi, rbx);
+    __ Assert(equal, "Unexpected Array function");
+    // Initial map for the builtin Array function should be a map.
+    __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    ASSERT(kSmiTag == 0);
+    Condition not_smi = __ CheckNotSmi(rbx);
+    __ Assert(not_smi, "Unexpected initial map for Array function");
+    __ CmpObjectType(rbx, MAP_TYPE, rcx);
+    __ Assert(equal, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as constructor.
+  ArrayNativeCode(masm, &generic_constructor);
+
+  // Jump to the generic construct code in case the specialized code cannot
+  // handle the construction.
+  __ bind(&generic_constructor);
   Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
   Handle<Code> generic_construct_stub(code);
   __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
@@ -529,6 +941,7 @@
     // rdi: constructor
     __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
     // Will both indicate a NULL and a Smi
+    ASSERT(kSmiTag == 0);
     __ JumpIfSmi(rax, &rt_call);
     // rdi: constructor
     // rax: initial map (if proven valid below)
@@ -547,12 +960,12 @@
     __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
     __ shl(rdi, Immediate(kPointerSizeLog2));
     // rdi: size of new object
-    __ AllocateObjectInNewSpace(rdi,
-                                rbx,
-                                rdi,
-                                no_reg,
-                                &rt_call,
-                                NO_ALLOCATION_FLAGS);
+    __ AllocateInNewSpace(rdi,
+                          rbx,
+                          rdi,
+                          no_reg,
+                          &rt_call,
+                          NO_ALLOCATION_FLAGS);
     // Allocated the JSObject, now initialize the fields.
     // rax: initial map
     // rbx: JSObject (not HeapObject tagged - the actual address).
@@ -607,14 +1020,14 @@
     // rbx: JSObject
     // rdi: start of next object (will be start of FixedArray)
     // rdx: number of elements in properties array
-    __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
-                                times_pointer_size,
-                                rdx,
-                                rdi,
-                                rax,
-                                no_reg,
-                                &undo_allocation,
-                                RESULT_CONTAINS_TOP);
+    __ AllocateInNewSpace(FixedArray::kHeaderSize,
+                          times_pointer_size,
+                          rdx,
+                          rdi,
+                          rax,
+                          no_reg,
+                          &undo_allocation,
+                          RESULT_CONTAINS_TOP);
 
     // Initialize the FixedArray.
     // rbx: JSObject
diff --git a/src/x64/cfg-x64.cc b/src/x64/cfg-x64.cc
deleted file mode 100644
index b755f49..0000000
--- a/src/x64/cfg-x64.cc
+++ /dev/null
@@ -1,324 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cfg.h"
-#include "codegen-inl.h"
-#include "codegen-x64.h"
-#include "debug.h"
-#include "macro-assembler-x64.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void InstructionBlock::Compile(MacroAssembler* masm) {
-  ASSERT(!is_marked());
-  is_marked_ = true;
-  {
-    Comment cmt(masm, "[ InstructionBlock");
-    for (int i = 0, len = instructions_.length(); i < len; i++) {
-      // If the location of the current instruction is a temp, then the
-      // instruction cannot be in tail position in the block.  Allocate the
-      // temp based on peeking ahead to the next instruction.
-      Instruction* instr = instructions_[i];
-      Location* loc = instr->location();
-      if (loc->is_temporary()) {
-        instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
-      }
-      instructions_[i]->Compile(masm);
-    }
-  }
-  successor_->Compile(masm);
-}
-
-
-void EntryNode::Compile(MacroAssembler* masm) {
-  ASSERT(!is_marked());
-  is_marked_ = true;
-  Label deferred_enter, deferred_exit;
-  {
-    Comment cmnt(masm, "[ EntryNode");
-    __ push(rbp);
-    __ movq(rbp, rsp);
-    __ push(rsi);
-    __ push(rdi);
-    int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
-    if (count > 0) {
-      __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
-      for (int i = 0; i < count; i++) {
-        __ push(kScratchRegister);
-      }
-    }
-    if (FLAG_trace) {
-      __ CallRuntime(Runtime::kTraceEnter, 0);
-    }
-    if (FLAG_check_stack) {
-      ExternalReference stack_limit =
-          ExternalReference::address_of_stack_guard_limit();
-      __ movq(kScratchRegister, stack_limit);
-      __ cmpq(rsp, Operand(kScratchRegister, 0));
-      __ j(below, &deferred_enter);
-      __ bind(&deferred_exit);
-    }
-  }
-  successor_->Compile(masm);
-  if (FLAG_check_stack) {
-    Comment cmnt(masm, "[ Deferred Stack Check");
-    __ bind(&deferred_enter);
-    StackCheckStub stub;
-    __ CallStub(&stub);
-    __ jmp(&deferred_exit);
-  }
-}
-
-
-void ExitNode::Compile(MacroAssembler* masm) {
-  ASSERT(!is_marked());
-  is_marked_ = true;
-  Comment cmnt(masm, "[ ExitNode");
-  if (FLAG_trace) {
-    __ push(rax);
-    __ CallRuntime(Runtime::kTraceExit, 1);
-  }
-  __ RecordJSReturn();
-  __ movq(rsp, rbp);
-  __ pop(rbp);
-  int count = CfgGlobals::current()->fun()->scope()->num_parameters();
-  __ ret((count + 1) * kPointerSize);
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Add padding that will be overwritten by a debugger breakpoint.
-  // "movq rsp, rbp; pop rbp" has length 4.  "ret k" has length 3.
-  const int kPadding = Debug::kX64JSReturnSequenceLength - 4 - 3;
-  for (int i = 0; i < kPadding; ++i) {
-    __ int3();
-  }
-#endif
-}
-
-
-void PropLoadInstr::Compile(MacroAssembler* masm) {
-  // The key should not be on the stack---if it is a compiler-generated
-  // temporary it is in the accumulator.
-  ASSERT(!key()->is_on_stack());
-
-  Comment cmnt(masm, "[ Load from Property");
-  // If the key is known at compile-time we may be able to use a load IC.
-  bool is_keyed_load = true;
-  if (key()->is_constant()) {
-    // Still use the keyed load IC if the key can be parsed as an integer so
-    // we will get into the case that handles [] on string objects.
-    Handle<Object> key_val = Constant::cast(key())->handle();
-    uint32_t ignored;
-    if (key_val->IsSymbol() &&
-        !String::cast(*key_val)->AsArrayIndex(&ignored)) {
-      is_keyed_load = false;
-    }
-  }
-
-  if (!object()->is_on_stack()) object()->Push(masm);
-  // A test rax instruction after the call indicates to the IC code that it
-  // was inlined.  Ensure there is not one after the call below.
-  if (is_keyed_load) {
-    key()->Push(masm);
-    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    __ pop(rbx);  // Discard key.
-  } else {
-    key()->Get(masm, rcx);
-    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-  }
-  __ pop(rbx);  // Discard receiver.
-  location()->Set(masm, rax);
-}
-
-
-void BinaryOpInstr::Compile(MacroAssembler* masm) {
-  // The right-hand value should not be on the stack---if it is a
-  // compiler-generated temporary it is in the accumulator.
-  ASSERT(!right()->is_on_stack());
-
-  Comment cmnt(masm, "[ BinaryOpInstr");
-  // We can overwrite one of the operands if it is a temporary.
-  OverwriteMode mode = NO_OVERWRITE;
-  if (left()->is_temporary()) {
-    mode = OVERWRITE_LEFT;
-  } else if (right()->is_temporary()) {
-    mode = OVERWRITE_RIGHT;
-  }
-
-  // Push both operands and call the specialized stub.
-  if (!left()->is_on_stack()) left()->Push(masm);
-  right()->Push(masm);
-  GenericBinaryOpStub stub(op(), mode, SMI_CODE_IN_STUB);
-  __ CallStub(&stub);
-  location()->Set(masm, rax);
-}
-
-
-void ReturnInstr::Compile(MacroAssembler* masm) {
-  // The location should be 'Effect'.  As a side effect, move the value to
-  // the accumulator.
-  Comment cmnt(masm, "[ ReturnInstr");
-  value()->Get(masm, rax);
-}
-
-
-void Constant::Get(MacroAssembler* masm, Register reg) {
-  __ Move(reg, handle_);
-}
-
-
-void Constant::Push(MacroAssembler* masm) {
-  __ Push(handle_);
-}
-
-
-static Operand ToOperand(SlotLocation* loc) {
-  switch (loc->type()) {
-    case Slot::PARAMETER: {
-      int count = CfgGlobals::current()->fun()->scope()->num_parameters();
-      return Operand(rbp, (1 + count - loc->index()) * kPointerSize);
-    }
-    case Slot::LOCAL: {
-      const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
-      return Operand(rbp, kOffset - loc->index() * kPointerSize);
-    }
-    default:
-      UNREACHABLE();
-      return Operand(rax, 0);
-  }
-}
-
-
-void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
-  __ Move(ToOperand(loc), handle_);
-}
-
-
-void SlotLocation::Get(MacroAssembler* masm, Register reg) {
-  __ movq(reg, ToOperand(this));
-}
-
-
-void SlotLocation::Set(MacroAssembler* masm, Register reg) {
-  __ movq(ToOperand(this), reg);
-}
-
-
-void SlotLocation::Push(MacroAssembler* masm) {
-  __ push(ToOperand(this));
-}
-
-
-void SlotLocation::Move(MacroAssembler* masm, Value* value) {
-  // We dispatch to the value because in some cases (temp or constant) we
-  // can use special instruction sequences.
-  value->MoveToSlot(masm, this);
-}
-
-
-void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
-  __ movq(kScratchRegister, ToOperand(this));
-  __ movq(ToOperand(loc), kScratchRegister);
-}
-
-
-void TempLocation::Get(MacroAssembler* masm, Register reg) {
-  switch (where_) {
-    case ACCUMULATOR:
-      if (!reg.is(rax)) __ movq(reg, rax);
-      break;
-    case STACK:
-      __ pop(reg);
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::Set(MacroAssembler* masm, Register reg) {
-  switch (where_) {
-    case ACCUMULATOR:
-      if (!reg.is(rax)) __ movq(rax, reg);
-      break;
-    case STACK:
-      __ push(reg);
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::Push(MacroAssembler* masm) {
-  switch (where_) {
-    case ACCUMULATOR:
-      __ push(rax);
-      break;
-    case STACK:
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::Move(MacroAssembler* masm, Value* value) {
-  switch (where_) {
-    case ACCUMULATOR:
-      value->Get(masm, rax);
-      break;
-    case STACK:
-      value->Push(masm);
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
-  switch (where_) {
-    case ACCUMULATOR:
-      __ movq(ToOperand(loc), rax);
-      break;
-    case STACK:
-      __ pop(ToOperand(loc));
-      break;
-    case NOT_ALLOCATED:
-      UNREACHABLE();
-  }
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index e4dbd62..8e6dbef 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -913,7 +913,6 @@
 
 void CodeGenerator::VisitDeclaration(Declaration* node) {
   Comment cmnt(masm_, "[ Declaration");
-  CodeForStatementPosition(node);
   Variable* var = node->proxy()->var();
   ASSERT(var != NULL);  // must have been resolved
   Slot* slot = var->slot();
@@ -2592,7 +2591,6 @@
 
 void CodeGenerator::VisitAssignment(Assignment* node) {
   Comment cmnt(masm_, "[ Assignment");
-  CodeForStatementPosition(node);
 
   { Reference target(this, node->target());
     if (target.is_illegal()) {
@@ -2674,8 +2672,6 @@
 
 void CodeGenerator::VisitThrow(Throw* node) {
   Comment cmnt(masm_, "[ Throw");
-  CodeForStatementPosition(node);
-
   Load(node->exception());
   Result result = frame_->CallRuntime(Runtime::kThrow, 1);
   frame_->Push(&result);
@@ -2694,8 +2690,6 @@
 
   ZoneList<Expression*>* args = node->arguments();
 
-  CodeForStatementPosition(node);
-
   // Check if the function is a variable or a property.
   Expression* function = node->expression();
   Variable* var = function->AsVariableProxy()->AsVariable();
@@ -2710,7 +2704,64 @@
   // is resolved in cache misses (this also holds for megamorphic calls).
   // ------------------------------------------------------------------------
 
-  if (var != NULL && !var->is_this() && var->is_global()) {
+  if (var != NULL && var->is_possibly_eval()) {
+    // ----------------------------------
+    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
+    // ----------------------------------
+
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+
+    // Prepare the stack for the call to the resolved function.
+    Load(function);
+
+    // Allocate a frame slot for the receiver.
+    frame_->Push(Factory::undefined_value());
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      Load(args->at(i));
+    }
+
+    // Prepare the stack for the call to ResolvePossiblyDirectEval.
+    frame_->PushElementAt(arg_count + 1);
+    if (arg_count > 0) {
+      frame_->PushElementAt(arg_count);
+    } else {
+      frame_->Push(Factory::undefined_value());
+    }
+
+    // Resolve the call.
+    Result result =
+        frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+    // Touch up the stack with the right values for the function and the
+    // receiver.  Use a scratch register to avoid destroying the result.
+    Result scratch = allocator_->Allocate();
+    ASSERT(scratch.is_valid());
+    __ movq(scratch.reg(),
+            FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
+    frame_->SetElementAt(arg_count + 1, &scratch);
+
+    // We can reuse the result register now.
+    frame_->Spill(result.reg());
+    __ movq(result.reg(),
+            FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
+    frame_->SetElementAt(arg_count, &result);
+
+    // Call the function.
+    CodeForSourcePosition(node->position());
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub call_function(arg_count, in_loop);
+    result = frame_->CallStub(&call_function, arg_count + 1);
+
+    // Restore the context and overwrite the function on the stack with
+    // the result.
+    frame_->RestoreContextRegister();
+    frame_->SetElementAt(0, &result);
+
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
     // ----------------------------------
     // JavaScript example: 'foo(1, 2, 3)'  // foo is global
     // ----------------------------------
@@ -2737,6 +2788,7 @@
     frame_->RestoreContextRegister();
     // Replace the function on the stack with the result.
     frame_->SetElementAt(0, &result);
+
   } else if (var != NULL && var->slot() != NULL &&
              var->slot()->type() == Slot::LOOKUP) {
     // ----------------------------------
@@ -2763,6 +2815,7 @@
 
     // Call the function.
     CallWithArguments(args, node->position());
+
   } else if (property != NULL) {
     // Check if the key is a literal string.
     Literal* literal = property->key()->AsLiteral();
@@ -2828,6 +2881,7 @@
       // Call the function.
       CallWithArguments(args, node->position());
     }
+
   } else {
     // ----------------------------------
     // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
@@ -2845,70 +2899,8 @@
 }
 
 
-void CodeGenerator::VisitCallEval(CallEval* node) {
-  Comment cmnt(masm_, "[ CallEval");
-
-  // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
-  // the function we need to call and the receiver of the call.
-  // Then we call the resolved function using the given arguments.
-
-  ZoneList<Expression*>* args = node->arguments();
-  Expression* function = node->expression();
-
-  CodeForStatementPosition(node);
-
-  // Prepare the stack for the call to the resolved function.
-  Load(function);
-
-  // Allocate a frame slot for the receiver.
-  frame_->Push(Factory::undefined_value());
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  // Prepare the stack for the call to ResolvePossiblyDirectEval.
-  frame_->PushElementAt(arg_count + 1);
-  if (arg_count > 0) {
-    frame_->PushElementAt(arg_count);
-  } else {
-    frame_->Push(Factory::undefined_value());
-  }
-
-  // Resolve the call.
-  Result result =
-      frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
-  // Touch up the stack with the right values for the function and the
-  // receiver.  Use a scratch register to avoid destroying the result.
-  Result scratch = allocator_->Allocate();
-  ASSERT(scratch.is_valid());
-  __ movq(scratch.reg(),
-          FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
-  frame_->SetElementAt(arg_count + 1, &scratch);
-
-  // We can reuse the result register now.
-  frame_->Spill(result.reg());
-  __ movq(result.reg(),
-          FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
-  frame_->SetElementAt(arg_count, &result);
-
-  // Call the function.
-  CodeForSourcePosition(node->position());
-  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub call_function(arg_count, in_loop);
-  result = frame_->CallStub(&call_function, arg_count + 1);
-
-  // Restore the context and overwrite the function on the stack with
-  // the result.
-  frame_->RestoreContextRegister();
-  frame_->SetElementAt(0, &result);
-}
-
-
 void CodeGenerator::VisitCallNew(CallNew* node) {
   Comment cmnt(masm_, "[ CallNew");
-  CodeForStatementPosition(node);
 
   // According to ECMA-262, section 11.2.2, page 44, the function
   // expression in new calls must be evaluated before the
@@ -7204,12 +7196,12 @@
                                              Register scratch,
                                              Register result) {
   // Allocate heap number in new space.
-  __ AllocateObjectInNewSpace(HeapNumber::kSize,
-                              result,
-                              scratch,
-                              no_reg,
-                              need_gc,
-                              TAG_OBJECT);
+  __ AllocateInNewSpace(HeapNumber::kSize,
+                        result,
+                        scratch,
+                        no_reg,
+                        need_gc,
+                        TAG_OBJECT);
 
   // Set the map and tag the result.
   __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 2ae8145..87db3a9 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -553,7 +553,7 @@
   // information.
   void CodeForFunctionPosition(FunctionLiteral* fun);
   void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(AstNode* node);
+  void CodeForStatementPosition(Statement* node);
   void CodeForSourcePosition(int pos);
 
 #ifdef DEBUG
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index b2f52b2..8209091 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -257,7 +257,7 @@
   //  -- rsp[8] : name
   //  -- rsp[16] : receiver
   // -----------------------------------
-  Label slow, fast, check_string, index_int, index_string;
+  Label slow, check_string, index_int, index_string, check_pixel_array;
 
   // Load name and receiver.
   __ movq(rax, Operand(rsp, kPointerSize));
@@ -287,11 +287,36 @@
   __ bind(&index_int);
   __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
   // Check that the object is in fast mode (not dictionary).
-  __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
-  __ j(not_equal, &slow);
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &check_pixel_array);
   // Check that the key (index) is within bounds.
   __ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
-  __ j(below, &fast);  // Unsigned comparison rejects negative indices.
+  __ j(above_equal, &slow);  // Unsigned comparison rejects negative indices.
+  // Fast case: Do the load.
+  __ movq(rax, Operand(rcx, rax, times_pointer_size,
+                      FixedArray::kHeaderSize - kHeapObjectTag));
+  __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ j(equal, &slow);
+  __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+  __ ret(0);
+
+  // Check whether the elements is a pixel array.
+  // rax: untagged index
+  // rcx: elements array
+  __ bind(&check_pixel_array);
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::kPixelArrayMapRootIndex);
+  __ j(not_equal, &slow);
+  __ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
+  __ movb(rax, Operand(rcx, rax, times_1, 0));
+  __ Integer32ToSmi(rax, rax);
+  __ ret(0);
+
   // Slow case: Load name and receiver from stack and jump to runtime.
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
@@ -332,16 +357,6 @@
   __ and_(rax, Immediate((1 << String::kShortLengthShift) - 1));
   __ shrl(rax, Immediate(String::kLongLengthShift));
   __ jmp(&index_int);
-  // Fast case: Do the load.
-  __ bind(&fast);
-  __ movq(rax, Operand(rcx, rax, times_pointer_size,
-                      FixedArray::kHeaderSize - kHeapObjectTag));
-  __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
-  // In case the loaded value is the_hole we have to consult GetProperty
-  // to ensure the prototype chain is searched.
-  __ j(equal, &slow);
-  __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
-  __ ret(0);
 }
 
 
@@ -402,7 +417,7 @@
   //  -- rsp[8] : key
   //  -- rsp[16] : receiver
   // -----------------------------------
-  Label slow, fast, array, extra;
+  Label slow, fast, array, extra, check_pixel_array;
 
   // Get the receiver from the stack.
   __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // 2 ~ return address, key
@@ -435,8 +450,9 @@
   // rbx: index (as a smi), zero-extended.
   __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
   // Check that the object is in fast mode (not dictionary).
-  __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
-  __ j(not_equal, &slow);
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &check_pixel_array);
   // Untag the key (for checking against untagged length in the fixed array).
   __ SmiToInteger32(rdx, rbx);
   __ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
@@ -445,7 +461,6 @@
   // rbx: index (as a smi)
   __ j(below, &fast);
 
-
   // Slow case: Push extra copies of the arguments (3).
   __ bind(&slow);
   __ pop(rcx);
@@ -456,6 +471,37 @@
   // Do tail-call to runtime routine.
   __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
 
+  // Check whether the elements is a pixel array.
+  // rax: value
+  // rcx: elements array
+  // rbx: index (as a smi), zero-extended.
+  __ bind(&check_pixel_array);
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::kPixelArrayMapRootIndex);
+  __ j(not_equal, &slow);
+  // Check that the value is a smi. If a conversion is needed call into the
+  // runtime to convert and clamp.
+  __ JumpIfNotSmi(rax, &slow);
+  __ SmiToInteger32(rbx, rbx);
+  __ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  __ movq(rdx, rax);  // Save the value.
+  __ SmiToInteger32(rax, rax);
+  {  // Clamp the value to [0..255].
+    Label done, is_negative;
+    __ testl(rax, Immediate(0xFFFFFF00));
+    __ j(zero, &done);
+    __ j(negative, &is_negative);
+    __ movl(rax, Immediate(255));
+    __ jmp(&done);
+    __ bind(&is_negative);
+    __ xorl(rax, rax);  // Clear rax.
+    __ bind(&done);
+  }
+  __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
+  __ movb(Operand(rcx, rbx, times_1, 0), rax);
+  __ movq(rax, rdx);  // Return the original value.
+  __ ret(0);
 
   // Extra capacity case: Check if there is extra capacity to
   // perform the store and update the length. Used for adding one
@@ -476,7 +522,6 @@
   __ SmiSubConstant(rbx, rbx, 1, NULL);
   __ jmp(&fast);
 
-
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode; if it is the
   // length is always a smi.
@@ -493,7 +538,6 @@
   __ cmpl(rbx, FieldOperand(rdx, JSArray::kLengthOffset));
   __ j(above_equal, &extra);
 
-
   // Fast case: Do the store.
   __ bind(&fast);
   // rax: value
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 637428d..2689e38 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -63,6 +63,13 @@
 }
 
 
+void MacroAssembler::CompareRoot(Operand with,
+                                 Heap::RootListIndex index) {
+  LoadRoot(kScratchRegister, index);
+  cmpq(with, kScratchRegister);
+}
+
+
 static void RecordWriteHelper(MacroAssembler* masm,
                               Register object,
                               Register addr,
@@ -332,17 +339,16 @@
   // should remove this need and make the runtime routine entry code
   // smarter.
   movq(rax, Immediate(num_arguments));
-  JumpToBuiltin(ext, result_size);
+  JumpToRuntime(ext, result_size);
 }
 
 
-void MacroAssembler::JumpToBuiltin(const ExternalReference& ext,
+void MacroAssembler::JumpToRuntime(const ExternalReference& ext,
                                    int result_size) {
   // Set the entry point and jump to the C entry runtime stub.
   movq(rbx, ext);
   CEntryStub ces(result_size);
-  movq(kScratchRegister, ces.GetCode(), RelocInfo::CODE_TARGET);
-  jmp(kScratchRegister);
+  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
@@ -357,7 +363,6 @@
   if (!resolved) {
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(true);
     Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name };
     unresolved_.Add(entry);
@@ -519,6 +524,18 @@
 }
 
 
+void MacroAssembler::JumpIfSmiGreaterEqualsConstant(Register src,
+                                                    int constant,
+                                                    Label* on_greater_equals) {
+  if (Smi::IsValid(constant)) {
+    Condition are_greater_equal = CheckSmiGreaterEqualsConstant(src, constant);
+    j(are_greater_equal, on_greater_equals);
+  } else if (constant < Smi::kMinValue) {
+    jmp(on_greater_equals);
+  }
+}
+
+
 void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
   Condition is_valid = CheckInteger32ValidSmiValue(src);
   j(ReverseCondition(is_valid), on_invalid);
@@ -602,6 +619,22 @@
 }
 
 
+Condition MacroAssembler::CheckSmiGreaterEqualsConstant(Register src,
+                                                        int constant) {
+  if (constant == 0) {
+    testl(src, Immediate(static_cast<uint32_t>(0x80000000u)));
+    return positive;
+  }
+  if (Smi::IsValid(constant)) {
+    cmpl(src, Immediate(Smi::FromInt(constant)));
+    return greater_equal;
+  }
+  // Can't be equal.
+  UNREACHABLE();
+  return no_condition;
+}
+
+
 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
   // A 32-bit integer value can be converted to a smi if it is in the
   // range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
@@ -1235,17 +1268,8 @@
 
 
 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
-  movq(kScratchRegister, code_object, rmode);
-#ifdef DEBUG
-  Label target;
-  bind(&target);
-#endif
-  jmp(kScratchRegister);
-#ifdef DEBUG
-  ASSERT_EQ(kCallTargetAddressOffset,
-            SizeOfCodeGeneratedSince(&target) + kPointerSize);
-#endif
+  // TODO(X64): Inline this
+  jmp(code_object, rmode);
 }
 
 
@@ -1264,17 +1288,7 @@
 void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
   ASSERT(RelocInfo::IsCodeTarget(rmode));
   WriteRecordedPositions();
-  movq(kScratchRegister, code_object, rmode);
-#ifdef DEBUG
-  // Patch target is kPointer size bytes *before* target label.
-  Label target;
-  bind(&target);
-#endif
-  call(kScratchRegister);
-#ifdef DEBUG
-  ASSERT_EQ(kCallTargetAddressOffset,
-            SizeOfCodeGeneratedSince(&target) + kPointerSize);
-#endif
+  call(code_object, rmode);
 }
 
 
@@ -1541,7 +1555,6 @@
   if (!resolved) {
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(false);
     Unresolved entry =
         { pc_offset() - kCallTargetAddressOffset, flags, name };
@@ -2024,12 +2037,12 @@
 }
 
 
-void MacroAssembler::AllocateObjectInNewSpace(int object_size,
-                                              Register result,
-                                              Register result_end,
-                                              Register scratch,
-                                              Label* gc_required,
-                                              AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int object_size,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
   ASSERT(!result.is(result_end));
 
   // Load address of new object into result.
@@ -2053,14 +2066,14 @@
 }
 
 
-void MacroAssembler::AllocateObjectInNewSpace(int header_size,
-                                              ScaleFactor element_size,
-                                              Register element_count,
-                                              Register result,
-                                              Register result_end,
-                                              Register scratch,
-                                              Label* gc_required,
-                                              AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(int header_size,
+                                        ScaleFactor element_size,
+                                        Register element_count,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
   ASSERT(!result.is(result_end));
 
   // Load address of new object into result.
@@ -2084,12 +2097,12 @@
 }
 
 
-void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
-                                              Register result,
-                                              Register result_end,
-                                              Register scratch,
-                                              Label* gc_required,
-                                              AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+                                        Register result,
+                                        Register result_end,
+                                        Register scratch,
+                                        Label* gc_required,
+                                        AllocationFlags flags) {
   // Load address of new object into result.
   LoadAllocationTopHelper(result, result_end, scratch, flags);
 
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index de2070a..adc136a 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -56,6 +56,7 @@
 
   void LoadRoot(Register destination, Heap::RootListIndex index);
   void CompareRoot(Register with, Heap::RootListIndex index);
+  void CompareRoot(Operand with, Heap::RootListIndex index);
   void PushRoot(Heap::RootListIndex index);
 
   // ---------------------------------------------------------------------------
@@ -193,6 +194,9 @@
   // Check whether a tagged smi is equal to a constant.
   Condition CheckSmiEqualsConstant(Register src, int constant);
 
+  // Check whether a tagged smi is greater than or equal to a constant.
+  Condition CheckSmiGreaterEqualsConstant(Register src, int constant);
+
   // Checks whether an 32-bit integer value is a valid for conversion
   // to a smi.
   Condition CheckInteger32ValidSmiValue(Register src);
@@ -216,6 +220,12 @@
   // to the constant.
   void JumpIfSmiEqualsConstant(Register src, int constant, Label* on_equals);
 
+  // Jump to label if the value is a tagged smi with value greater than or equal
+  // to the constant.
+  void JumpIfSmiGreaterEqualsConstant(Register src,
+                                      int constant,
+                                      Label* on_equals);
+
   // Jump if either or both register are not smi values.
   void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
 
@@ -449,30 +459,30 @@
   // and result_end have not yet been tagged as heap objects. If
   // result_contains_top_on_entry is true the content of result is known to be
   // the allocation top on entry (could be result_end from a previous call to
-  // AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
+  // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
   // should be no_reg as it is never used.
-  void AllocateObjectInNewSpace(int object_size,
-                                Register result,
-                                Register result_end,
-                                Register scratch,
-                                Label* gc_required,
-                                AllocationFlags flags);
+  void AllocateInNewSpace(int object_size,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
 
-  void AllocateObjectInNewSpace(int header_size,
-                                ScaleFactor element_size,
-                                Register element_count,
-                                Register result,
-                                Register result_end,
-                                Register scratch,
-                                Label* gc_required,
-                                AllocationFlags flags);
+  void AllocateInNewSpace(int header_size,
+                          ScaleFactor element_size,
+                          Register element_count,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
 
-  void AllocateObjectInNewSpace(Register object_size,
-                                Register result,
-                                Register result_end,
-                                Register scratch,
-                                Label* gc_required,
-                                AllocationFlags flags);
+  void AllocateInNewSpace(Register object_size,
+                          Register result,
+                          Register result_end,
+                          Register scratch,
+                          Label* gc_required,
+                          AllocationFlags flags);
 
   // Undo allocation in new space. The object passed and objects allocated after
   // it will no longer be allocated. Make sure that no pointers are left to the
@@ -527,14 +537,14 @@
   void CallRuntime(Runtime::FunctionId id, int num_arguments);
 
   // Tail call of a runtime routine (jump).
-  // Like JumpToBuiltin, but also takes care of passing the number
+  // Like JumpToRuntime, but also takes care of passing the number
   // of arguments.
   void TailCallRuntime(const ExternalReference& ext,
                        int num_arguments,
                        int result_size);
 
-  // Jump to the builtin routine.
-  void JumpToBuiltin(const ExternalReference& ext, int result_size);
+  // Jump to a runtime routine.
+  void JumpToRuntime(const ExternalReference& ext, int result_size);
 
 
   // ---------------------------------------------------------------------------
@@ -594,8 +604,16 @@
                       Label* done,
                       InvokeFlag flag);
 
-  // Get the code for the given builtin. Returns if able to resolve
-  // the function in the 'resolved' flag.
+  // Prepares for a call or jump to a builtin by doing two things:
+  // 1. Emits code that fetches the builtin's function object from the context
+  //    at runtime, and puts it in the register rdi.
+  // 2. Fetches the builtin's code object, and returns it in a handle, at
+  //    compile time, so that later code can emit instructions to jump or call
+  //    the builtin directly.  If the code object has not yet been created, it
+  //    returns the builtin code object for IllegalFunction, and sets the
+  //    output parameter "resolved" to false.  Code that uses the return value
+  //    should then add the address and the builtin name to the list of fixups
+  //    called unresolved_, which is fixed up by the bootstrapper.
   Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
 
   // Activation support.
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index 184c166..998c909 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -28,6 +28,7 @@
 #ifndef V8_X64_SIMULATOR_X64_H_
 #define V8_X64_SIMULATOR_X64_H_
 
+#include "allocation.h"
 
 // Since there is no simulator for the ia32 architecture the only thing we can
 // do is to call the entry directly.
@@ -35,15 +36,15 @@
 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
   entry(p0, p1, p2, p3, p4);
 
-// Calculated the stack limit beyond which we will throw stack overflow errors.
-// This macro must be called from a C++ method. It relies on being able to take
-// the address of "this" to get a value on the current execution stack and then
-// calculates the stack limit based on that value.
-// NOTE: The check for overflow is not safe as there is no guarantee that the
-// running thread has its stack in all memory up to address 0x00000000.
-#define GENERATED_CODE_STACK_LIMIT(limit) \
-  (reinterpret_cast<uintptr_t>(this) >= limit ? \
-      reinterpret_cast<uintptr_t>(this) - limit : 0)
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on x64 uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+    return c_limit;
+  }
+};
 
 // Call the generated regexp code directly. The entry function pointer should
 // expect seven int/pointer sized arguments and return an int.
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 741d4c3..0994230 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1751,6 +1751,7 @@
   // Load the initial map and verify that it is in fact a map.
   __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
   // Will both indicate a NULL and a Smi.
+  ASSERT(kSmiTag == 0);
   __ JumpIfSmi(rbx, &generic_stub_call);
   __ CmpObjectType(rbx, MAP_TYPE, rcx);
   __ j(not_equal, &generic_stub_call);
@@ -1768,12 +1769,12 @@
   // rbx: initial map
   __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
   __ shl(rcx, Immediate(kPointerSizeLog2));
-  __ AllocateObjectInNewSpace(rcx,
-                              rdx,
-                              rcx,
-                              no_reg,
-                              &generic_stub_call,
-                              NO_ALLOCATION_FLAGS);
+  __ AllocateInNewSpace(rcx,
+                        rdx,
+                        rcx,
+                        no_reg,
+                        &generic_stub_call,
+                        NO_ALLOCATION_FLAGS);
 
   // Allocated the JSObject, now initialize the fields and add the heap tag.
   // rbx: initial map