Version 1.2.5.

Fixed bug in initial boundary check for Boyer-Moore text search (issue 349).

Fixed compilation issues with MinGW and gcc 4.3+ and added support for armv7 and cortex-a8 architectures.  Patches by Lei Zhang and Craig Schlenter.

Added a script cache to the debugger.

Optimized compilation performance by improving internal data structures and avoiding expensive property load optimizations for code that's infrequently executed.

Exposed the calling JavaScript context through the static API function Context::GetCalling().


git-svn-id: http://v8.googlecode.com/svn/trunk@2050 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/accessors.cc b/src/accessors.cc
index 4cd93be..ee73ac0 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -34,7 +34,8 @@
 #include "top.h"
 #include "zone-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 template <class C>
diff --git a/src/accessors.h b/src/accessors.h
index 1dd8fdd..9e877d1 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -28,7 +28,8 @@
 #ifndef V8_ACCESSORS_H_
 #define V8_ACCESSORS_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // The list of accessor descriptors. This is a second-order macro
 // taking a macro to be applied to all accessor descriptor names.
diff --git a/src/allocation.cc b/src/allocation.cc
index 3d26123..41724b6 100644
--- a/src/allocation.cc
+++ b/src/allocation.cc
@@ -29,7 +29,8 @@
 
 #include "v8.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 void* Malloced::New(size_t size) {
diff --git a/src/allocation.h b/src/allocation.h
index a690f08..586c4fd 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -28,7 +28,8 @@
 #ifndef V8_ALLOCATION_H_
 #define V8_ALLOCATION_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // A class that controls whether allocation is allowed.  This is for
diff --git a/src/api.cc b/src/api.cc
index c16920b..48a9d1a 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -99,7 +99,6 @@
 // --- E x c e p t i o n   B e h a v i o r ---
 
 
-static bool has_shut_down = false;
 static FatalErrorCallback exception_behavior = NULL;
 
 
@@ -123,7 +122,7 @@
 // When V8 cannot allocated memory FatalProcessOutOfMemory is called.
 // The default fatal error handler is called and execution is stopped.
 void i::V8::FatalProcessOutOfMemory(const char* location) {
-  has_shut_down = true;
+  i::V8::SetFatalError();
   FatalErrorCallback callback = GetFatalErrorHandler();
   {
     LEAVE_V8;
@@ -142,13 +141,13 @@
 bool Utils::ReportApiFailure(const char* location, const char* message) {
   FatalErrorCallback callback = GetFatalErrorHandler();
   callback(location, message);
-  has_shut_down = true;
+  i::V8::SetFatalError();
   return false;
 }
 
 
 bool V8::IsDead() {
-  return has_shut_down;
+  return i::V8::IsDead();
 }
 
 
@@ -186,7 +185,8 @@
  * yet been done.
  */
 static inline bool IsDeadCheck(const char* location) {
-  return has_shut_down ? ReportV8Dead(location) : false;
+  return !i::V8::IsRunning()
+      && i::V8::IsDead() ? ReportV8Dead(location) : false;
 }
 
 
@@ -205,9 +205,14 @@
 static i::StringInputBuffer write_input_buffer;
 
 
-static void EnsureInitialized(const char* location) {
-  if (IsDeadCheck(location)) return;
-  ApiCheck(v8::V8::Initialize(), location, "Error initializing V8");
+static inline bool EnsureInitialized(const char* location) {
+  if (i::V8::IsRunning()) {
+    return true;
+  }
+  if (IsDeadCheck(location)) {
+    return false;
+  }
+  return ApiCheck(v8::V8::Initialize(), location, "Error initializing V8");
 }
 
 
@@ -225,29 +230,25 @@
 
 
 v8::Handle<v8::Primitive> ImplementationUtilities::Undefined() {
-  if (IsDeadCheck("v8::Undefined()")) return v8::Handle<v8::Primitive>();
-  EnsureInitialized("v8::Undefined()");
+  if (!EnsureInitialized("v8::Undefined()")) return v8::Handle<v8::Primitive>();
   return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::undefined_value()));
 }
 
 
 v8::Handle<v8::Primitive> ImplementationUtilities::Null() {
-  if (IsDeadCheck("v8::Null()")) return v8::Handle<v8::Primitive>();
-  EnsureInitialized("v8::Null()");
+  if (!EnsureInitialized("v8::Null()")) return v8::Handle<v8::Primitive>();
   return v8::Handle<Primitive>(ToApi<Primitive>(i::Factory::null_value()));
 }
 
 
 v8::Handle<v8::Boolean> ImplementationUtilities::True() {
-  if (IsDeadCheck("v8::True()")) return v8::Handle<v8::Boolean>();
-  EnsureInitialized("v8::True()");
+  if (!EnsureInitialized("v8::True()")) return v8::Handle<v8::Boolean>();
   return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::true_value()));
 }
 
 
 v8::Handle<v8::Boolean> ImplementationUtilities::False() {
-  if (IsDeadCheck("v8::False()")) return v8::Handle<v8::Boolean>();
-  EnsureInitialized("v8::False()");
+  if (!EnsureInitialized("v8::False()")) return v8::Handle<v8::Boolean>();
   return v8::Handle<v8::Boolean>(ToApi<Boolean>(i::Factory::false_value()));
 }
 
@@ -373,21 +374,21 @@
 
 bool V8::IsGlobalNearDeath(void** obj) {
   LOG_API("IsGlobalNearDeath");
-  if (has_shut_down) return false;
+  if (!i::V8::IsRunning()) return false;
   return i::GlobalHandles::IsNearDeath(reinterpret_cast<i::Object**>(obj));
 }
 
 
 bool V8::IsGlobalWeak(void** obj) {
   LOG_API("IsGlobalWeak");
-  if (has_shut_down) return false;
+  if (!i::V8::IsRunning()) return false;
   return i::GlobalHandles::IsWeak(reinterpret_cast<i::Object**>(obj));
 }
 
 
 void V8::DisposeGlobal(void** obj) {
   LOG_API("DisposeGlobal");
-  if (has_shut_down) return;
+  if (!i::V8::IsRunning()) return;
   i::Object** ptr = reinterpret_cast<i::Object**>(obj);
   if ((*ptr)->IsGlobalContext()) i::Heap::NotifyContextDisposed();
   i::GlobalHandles::Destroy(ptr);
@@ -431,7 +432,7 @@
 
 
 void Context::Exit() {
-  if (has_shut_down) return;
+  if (!i::V8::IsRunning()) return;
   if (!ApiCheck(thread_local.LeaveLastContext(),
                 "v8::Context::Exit()",
                 "Cannot exit non-entered context")) {
@@ -2450,7 +2451,7 @@
 // --- E n v i r o n m e n t ---
 
 bool v8::V8::Initialize() {
-  if (i::V8::HasBeenSetup()) return true;
+  if (i::V8::IsRunning()) return true;
   ENTER_V8;
   HandleScope scope;
   if (i::Snapshot::Initialize()) {
@@ -3123,12 +3124,23 @@
 #endif
 }
 
+
 void V8::ResumeProfiler() {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   i::Logger::ResumeProfiler();
 #endif
 }
 
+
+bool V8::IsProfilerPaused() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  return i::Logger::IsProfilerPaused();
+#else
+  return true;
+#endif
+}
+
+
 int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   return i::Logger::GetLogLines(from_pos, dest_buf, max_size);
@@ -3320,7 +3332,7 @@
 
 
 void Debug::DebugBreak() {
-  if (!i::V8::HasBeenSetup()) return;
+  if (!i::V8::IsRunning()) return;
   i::StackGuard::DebugBreak();
 }
 
@@ -3362,7 +3374,7 @@
 
 void Debug::SendCommand(const uint16_t* command, int length,
                         ClientData* client_data) {
-  if (!i::V8::HasBeenSetup()) return;
+  if (!i::V8::IsRunning()) return;
   i::Debugger::ProcessCommand(i::Vector<const uint16_t>(command, length),
                               client_data);
 }
@@ -3378,7 +3390,7 @@
 
 Handle<Value> Debug::Call(v8::Handle<v8::Function> fun,
                           v8::Handle<v8::Value> data) {
-  if (!i::V8::HasBeenSetup()) return Handle<Value>();
+  if (!i::V8::IsRunning()) return Handle<Value>();
   ON_BAILOUT("v8::Debug::Call()", return Handle<Value>());
   ENTER_V8;
   i::Handle<i::Object> result;
diff --git a/src/arguments.h b/src/arguments.h
index 2ec68ed..80f9006 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -28,7 +28,8 @@
 #ifndef V8_ARGUMENTS_H_
 #define V8_ARGUMENTS_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Arguments provides access to runtime call parameters.
 //
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index fe64761..824a5fd 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -41,7 +41,8 @@
 #include "cpu.h"
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 Condition NegateCondition(Condition cc) {
   ASSERT(cc != al);
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 4b06487..6ec8f46 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -39,7 +39,8 @@
 #include "arm/assembler-arm-inl.h"
 #include "serialize.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -----------------------------------------------------------------------------
 // Implementation of Register and CRegister
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index d4c7979..5dc15a6 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -42,7 +42,8 @@
 
 #include "assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // CPU Registers.
 //
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index a65bc35..588798b 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -31,7 +31,8 @@
 #include "debug.h"
 #include "runtime.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 #define __ ACCESS_MASM(masm)
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 3c6578d..c7e32c3 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -36,7 +36,8 @@
 #include "scopes.h"
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #define __ ACCESS_MASM(masm_)
 
@@ -108,7 +109,7 @@
   RegisterAllocator register_allocator(this);
   allocator_ = &register_allocator;
   ASSERT(frame_ == NULL);
-  frame_ = new VirtualFrame(this);
+  frame_ = new VirtualFrame();
   cc_reg_ = al;
   set_in_spilled_code(false);
   {
@@ -133,13 +134,13 @@
 #endif
 
     // Allocate space for locals and initialize them.
-    frame_->AllocateStackSlots(scope_->num_stack_slots());
+    frame_->AllocateStackSlots();
     // Initialize the function return target after the locals are set
     // up, because it needs the expected frame height from the frame.
-    function_return_.Initialize(this, JumpTarget::BIDIRECTIONAL);
+    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
     function_return_is_shadowed_ = false;
 
-    VirtualFrame::SpilledScope spilled_scope(this);
+    VirtualFrame::SpilledScope spilled_scope;
     if (scope_->num_heap_slots() > 0) {
       // Allocate local context.
       // Get outer context and create a new context based on it.
@@ -148,7 +149,7 @@
       frame_->CallRuntime(Runtime::kNewContext, 1);  // r0 holds the result
 
 #ifdef DEBUG
-      JumpTarget verified_true(this);
+      JumpTarget verified_true;
       __ cmp(r0, Operand(cp));
       verified_true.Branch(eq);
       __ stop("NewContext: r0 is expected to be the same as cp");
@@ -456,14 +457,14 @@
   int original_height = frame_->height();
 #endif
   ASSERT(!in_spilled_code());
-  JumpTarget true_target(this);
-  JumpTarget false_target(this);
+  JumpTarget true_target;
+  JumpTarget false_target;
   LoadCondition(x, typeof_state, &true_target, &false_target, false);
 
   if (has_cc()) {
     // Convert cc_reg_ into a boolean value.
-    JumpTarget loaded(this);
-    JumpTarget materialize_true(this);
+    JumpTarget loaded;
+    JumpTarget materialize_true;
     materialize_true.Branch(cc_reg_);
     __ mov(r0, Operand(Factory::false_value()));
     frame_->EmitPush(r0);
@@ -478,7 +479,7 @@
   if (true_target.is_linked() || false_target.is_linked()) {
     // We have at least one condition value that has been "translated"
     // into a branch, thus it needs to be loaded explicitly.
-    JumpTarget loaded(this);
+    JumpTarget loaded;
     if (frame_ != NULL) {
       loaded.Jump();  // Don't lose the current TOS.
     }
@@ -510,14 +511,14 @@
 
 
 void CodeGenerator::LoadGlobal() {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   __ ldr(r0, GlobalObject());
   frame_->EmitPush(r0);
 }
 
 
 void CodeGenerator::LoadGlobalReceiver(Register scratch) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
   __ ldr(scratch,
          FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
@@ -529,7 +530,7 @@
 // that we have the INSIDE_TYPEOF typeof state. => Need to handle global
 // variables w/o reference errors elsewhere.
 void CodeGenerator::LoadTypeofExpression(Expression* x) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Variable* variable = x->AsVariableProxy()->AsVariable();
   if (variable != NULL && !variable->is_this() && variable->is_global()) {
     // NOTE: This is somewhat nasty. We force the compiler to load
@@ -559,7 +560,7 @@
 
 
 void CodeGenerator::LoadReference(Reference* ref) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ LoadReference");
   Expression* e = ref->expression();
   Property* property = e->AsProperty();
@@ -602,7 +603,7 @@
 
 
 void CodeGenerator::UnloadReference(Reference* ref) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   // Pop a reference from the stack while preserving TOS.
   Comment cmnt(masm_, "[ UnloadReference");
   int size = ref->size();
@@ -619,7 +620,7 @@
 // may jump to 'false_target' in case the register converts to 'false'.
 void CodeGenerator::ToBoolean(JumpTarget* true_target,
                               JumpTarget* false_target) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   // Note: The generated code snippet does not change stack variables.
   //       Only the condition code should be set.
   frame_->EmitPop(r0);
@@ -701,7 +702,7 @@
 
 void CodeGenerator::GenericBinaryOperation(Token::Value op,
                                            OverwriteMode overwrite_mode) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   // sp[0] : y
   // sp[1] : x
   // result : r0
@@ -781,7 +782,7 @@
 
 void DeferredInlineSmiOperation::Generate() {
   enter()->Bind();
-  VirtualFrame::SpilledScope spilled_scope(generator());
+  VirtualFrame::SpilledScope spilled_scope;
 
   switch (op_) {
     case Token::ADD: {
@@ -853,7 +854,7 @@
                                  Handle<Object> value,
                                  bool reversed,
                                  OverwriteMode mode) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   // NOTE: This is an attempt to inline (a bit) more of the code for
   // some possible smi operations (like + and -) when (at least) one
   // of the operands is a literal smi. With this optimization, the
@@ -865,7 +866,7 @@
 
   int int_value = Smi::cast(*value)->value();
 
-  JumpTarget exit(this);
+  JumpTarget exit;
   frame_->EmitPop(r0);
 
   switch (op) {
@@ -987,7 +988,7 @@
 
 
 void CodeGenerator::Comparison(Condition cc, bool strict) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   // sp[0] : y
   // sp[1] : x
   // result : cc register
@@ -995,8 +996,8 @@
   // Strict only makes sense for equality comparisons.
   ASSERT(!strict || cc == eq);
 
-  JumpTarget exit(this);
-  JumpTarget smi(this);
+  JumpTarget exit;
+  JumpTarget smi;
   // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
   if (cc == gt || cc == le) {
     cc = ReverseCondition(cc);
@@ -1057,12 +1058,14 @@
 
 class CallFunctionStub: public CodeStub {
  public:
-  explicit CallFunctionStub(int argc) : argc_(argc) {}
+  CallFunctionStub(int argc, InLoopFlag in_loop)
+      : argc_(argc), in_loop_(in_loop) {}
 
   void Generate(MacroAssembler* masm);
 
  private:
   int argc_;
+  InLoopFlag in_loop_;
 
 #if defined(DEBUG)
   void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
@@ -1070,13 +1073,14 @@
 
   Major MajorKey() { return CallFunction; }
   int MinorKey() { return argc_; }
+  InLoopFlag InLoop() { return in_loop_; }
 };
 
 
 // Call the function on the stack with the given arguments.
 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
                                          int position) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   // Push the arguments ("left-to-right") on the stack.
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
@@ -1087,7 +1091,8 @@
   CodeForSourcePosition(position);
 
   // Use the shared code stub to call the function.
-  CallFunctionStub call_function(arg_count);
+  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub call_function(arg_count, in_loop);
   frame_->CallStub(&call_function, arg_count + 1);
 
   // Restore context and pop function from the stack.
@@ -1097,7 +1102,7 @@
 
 
 void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   ASSERT(has_cc());
   Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
   target->Branch(cc);
@@ -1106,7 +1111,7 @@
 
 
 void CodeGenerator::CheckStack() {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   if (FLAG_check_stack) {
     Comment cmnt(masm_, "[ check stack");
     StackCheckStub stub;
@@ -1141,7 +1146,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
     VisitAndSpill(statements->at(i));
   }
@@ -1153,10 +1158,10 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Block");
   CodeForStatementPosition(node);
-  node->break_target()->Initialize(this);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
   VisitStatementsAndSpill(node->statements());
   if (node->break_target()->is_linked()) {
     node->break_target()->Bind();
@@ -1167,7 +1172,7 @@
 
 
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   __ mov(r0, Operand(pairs));
   frame_->EmitPush(r0);
   frame_->EmitPush(cp);
@@ -1182,7 +1187,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Declaration");
   CodeForStatementPosition(node);
   Variable* var = node->proxy()->var();
@@ -1254,7 +1259,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ ExpressionStatement");
   CodeForStatementPosition(node);
   Expression* expression = node->expression();
@@ -1269,7 +1274,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "// EmptyStatement");
   CodeForStatementPosition(node);
   // nothing to do
@@ -1281,7 +1286,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ IfStatement");
   // Generate different code depending on which parts of the if statement
   // are present or not.
@@ -1290,11 +1295,11 @@
 
   CodeForStatementPosition(node);
 
-  JumpTarget exit(this);
+  JumpTarget exit;
   if (has_then_stm && has_else_stm) {
     Comment cmnt(masm_, "[ IfThenElse");
-    JumpTarget then(this);
-    JumpTarget else_(this);
+    JumpTarget then;
+    JumpTarget else_;
     // if (cond)
     LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
                           &then, &else_, true);
@@ -1318,7 +1323,7 @@
   } else if (has_then_stm) {
     Comment cmnt(masm_, "[ IfThen");
     ASSERT(!has_else_stm);
-    JumpTarget then(this);
+    JumpTarget then;
     // if (cond)
     LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
                           &then, &exit, true);
@@ -1334,7 +1339,7 @@
   } else if (has_else_stm) {
     Comment cmnt(masm_, "[ IfElse");
     ASSERT(!has_then_stm);
-    JumpTarget else_(this);
+    JumpTarget else_;
     // if (!cond)
     LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
                           &exit, &else_, true);
@@ -1371,7 +1376,7 @@
 
 
 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ ContinueStatement");
   CodeForStatementPosition(node);
   node->target()->continue_target()->Jump();
@@ -1379,7 +1384,7 @@
 
 
 void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ BreakStatement");
   CodeForStatementPosition(node);
   node->target()->break_target()->Jump();
@@ -1387,7 +1392,7 @@
 
 
 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ ReturnStatement");
 
   if (function_return_is_shadowed_) {
@@ -1414,7 +1419,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ WithEnterStatement");
   CodeForStatementPosition(node);
   LoadAndSpill(node->expression());
@@ -1424,7 +1429,7 @@
     frame_->CallRuntime(Runtime::kPushContext, 1);
   }
 #ifdef DEBUG
-  JumpTarget verified_true(this);
+  JumpTarget verified_true;
   __ cmp(r0, Operand(cp));
   verified_true.Branch(eq);
   __ stop("PushContext: r0 is expected to be the same as cp");
@@ -1440,7 +1445,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ WithExitStatement");
   CodeForStatementPosition(node);
   // Pop context.
@@ -1467,9 +1472,9 @@
     Label* default_label,
     Vector<Label*> case_targets,
     Vector<Label> case_labels) {
-  VirtualFrame::SpilledScope spilled_scope(this);
-  JumpTarget setup_default(this);
-  JumpTarget is_smi(this);
+  VirtualFrame::SpilledScope spilled_scope;
+  JumpTarget setup_default;
+  JumpTarget is_smi;
 
   // A non-null default label pointer indicates a default case among
   // the case labels.  Otherwise we use the break target as a
@@ -1536,10 +1541,10 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ SwitchStatement");
   CodeForStatementPosition(node);
-  node->break_target()->Initialize(this);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
 
   LoadAndSpill(node->tag());
   if (TryGenerateFastCaseSwitchStatement(node)) {
@@ -1547,10 +1552,10 @@
     return;
   }
 
-  JumpTarget next_test(this);
-  JumpTarget fall_through(this);
-  JumpTarget default_entry(this);
-  JumpTarget default_exit(this, JumpTarget::BIDIRECTIONAL);
+  JumpTarget next_test;
+  JumpTarget fall_through;
+  JumpTarget default_entry;
+  JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
   ZoneList<CaseClause*>* cases = node->cases();
   int length = cases->length();
   CaseClause* default_clause = NULL;
@@ -1630,10 +1635,10 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ LoopStatement");
   CodeForStatementPosition(node);
-  node->break_target()->Initialize(this);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
 
   // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
   // known result for the test expression, with no side effects.
@@ -1654,19 +1659,19 @@
 
   switch (node->type()) {
     case LoopStatement::DO_LOOP: {
-      JumpTarget body(this, JumpTarget::BIDIRECTIONAL);
+      JumpTarget body(JumpTarget::BIDIRECTIONAL);
 
       // Label the top of the loop for the backward CFG edge.  If the test
       // is always true we can use the continue target, and if the test is
       // always false there is no need.
       if (info == ALWAYS_TRUE) {
-        node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
         node->continue_target()->Bind();
       } else if (info == ALWAYS_FALSE) {
-        node->continue_target()->Initialize(this);
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
       } else {
         ASSERT(info == DONT_KNOW);
-        node->continue_target()->Initialize(this);
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
         body.Bind();
       }
 
@@ -1713,11 +1718,11 @@
 
       // Label the top of the loop with the continue target for the backward
       // CFG edge.
-      node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
       node->continue_target()->Bind();
 
       if (info == DONT_KNOW) {
-        JumpTarget body(this);
+        JumpTarget body;
         LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
                               &body, node->break_target(), true);
         if (has_valid_frame()) {
@@ -1743,7 +1748,7 @@
     }
 
     case LoopStatement::FOR_LOOP: {
-      JumpTarget loop(this, JumpTarget::BIDIRECTIONAL);
+      JumpTarget loop(JumpTarget::BIDIRECTIONAL);
 
       if (node->init() != NULL) {
         VisitAndSpill(node->init());
@@ -1755,16 +1760,16 @@
       // If there is no update statement, label the top of the loop with the
       // continue target, otherwise with the loop target.
       if (node->next() == NULL) {
-        node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
         node->continue_target()->Bind();
       } else {
-        node->continue_target()->Initialize(this);
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
         loop.Bind();
       }
 
       // If the test is always true, there is no need to compile it.
       if (info == DONT_KNOW) {
-        JumpTarget body(this);
+        JumpTarget body;
         LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
                               &body, node->break_target(), true);
         if (has_valid_frame()) {
@@ -1820,16 +1825,16 @@
   int original_height = frame_->height();
 #endif
   ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ ForInStatement");
   CodeForStatementPosition(node);
 
-  JumpTarget primitive(this);
-  JumpTarget jsobject(this);
-  JumpTarget fixed_array(this);
-  JumpTarget entry(this, JumpTarget::BIDIRECTIONAL);
-  JumpTarget end_del_check(this);
-  JumpTarget exit(this);
+  JumpTarget primitive;
+  JumpTarget jsobject;
+  JumpTarget fixed_array;
+  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
+  JumpTarget end_del_check;
+  JumpTarget exit;
 
   // Get the object to enumerate over (converted to JSObject).
   LoadAndSpill(node->enumerable());
@@ -1914,8 +1919,8 @@
   // sp[4] : enumerable
   // Grab the current frame's height for the break and continue
   // targets only after all the state is pushed on the frame.
-  node->break_target()->Initialize(this);
-  node->continue_target()->Initialize(this);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
 
   __ ldr(r0, frame_->ElementAt(0));  // load the current count
   __ ldr(r1, frame_->ElementAt(1));  // load the length
@@ -2014,12 +2019,12 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ TryCatch");
   CodeForStatementPosition(node);
 
-  JumpTarget try_block(this);
-  JumpTarget exit(this);
+  JumpTarget try_block;
+  JumpTarget exit;
 
   try_block.Call();
   // --- Catch block ---
@@ -2141,7 +2146,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ TryFinally");
   CodeForStatementPosition(node);
 
@@ -2150,8 +2155,8 @@
   // break/continue from within the try block.
   enum { FALLING, THROWING, JUMPING };
 
-  JumpTarget try_block(this);
-  JumpTarget finally_block(this);
+  JumpTarget try_block;
+  JumpTarget finally_block;
 
   try_block.Call();
 
@@ -2296,7 +2301,7 @@
       JumpTarget* original = shadows[i]->other_target();
       __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
       if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
-        JumpTarget skip(this);
+        JumpTarget skip;
         skip.Branch(ne);
         frame_->PrepareForReturn();
         original->Jump();
@@ -2309,7 +2314,7 @@
 
   if (has_valid_frame()) {
     // Check if we need to rethrow the exception.
-    JumpTarget exit(this);
+    JumpTarget exit;
     __ cmp(r2, Operand(Smi::FromInt(THROWING)));
     exit.Branch(ne);
 
@@ -2328,7 +2333,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ DebuggerStatament");
   CodeForStatementPosition(node);
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -2340,7 +2345,7 @@
 
 
 void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   ASSERT(boilerplate->IsBoilerplate());
 
   // Push the boilerplate on the stack.
@@ -2358,7 +2363,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function boilerplate and instantiate it.
@@ -2378,7 +2383,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
   InstantiateBoilerplate(node->boilerplate());
   ASSERT(frame_->height() == original_height + 1);
@@ -2389,11 +2394,11 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Conditional");
-  JumpTarget then(this);
-  JumpTarget else_(this);
-  JumpTarget exit(this);
+  JumpTarget then;
+  JumpTarget else_;
+  JumpTarget exit;
   LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
                         &then, &else_, true);
   Branch(false, &else_);
@@ -2408,12 +2413,12 @@
 
 
 void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   if (slot->type() == Slot::LOOKUP) {
     ASSERT(slot->var()->is_dynamic());
 
-    JumpTarget slow(this);
-    JumpTarget done(this);
+    JumpTarget slow;
+    JumpTarget done;
 
     // Generate fast-case code for variables that might be shadowed by
     // eval-introduced variables.  Eval is used a lot without
@@ -2561,7 +2566,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Slot");
   LoadFromSlot(node, typeof_state());
   ASSERT(frame_->height() == original_height + 1);
@@ -2572,7 +2577,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ VariableProxy");
 
   Variable* var = node->var();
@@ -2592,7 +2597,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Literal");
   __ mov(r0, Operand(node->handle()));
   frame_->EmitPush(r0);
@@ -2604,7 +2609,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ RexExp Literal");
 
   // Retrieve the literal array and check the allocated entry.
@@ -2620,7 +2625,7 @@
       FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
   __ ldr(r2, FieldMemOperand(r1, literal_offset));
 
-  JumpTarget done(this);
+  JumpTarget done;
   __ cmp(r2, Operand(Factory::undefined_value()));
   done.Branch(ne);
 
@@ -2664,7 +2669,7 @@
 void DeferredObjectLiteral::Generate() {
   // Argument is passed in r1.
   enter()->Bind();
-  VirtualFrame::SpilledScope spilled_scope(generator());
+  VirtualFrame::SpilledScope spilled_scope;
 
   // If the entry is undefined we call the runtime system to compute
   // the literal.
@@ -2690,7 +2695,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ ObjectLiteral");
 
   DeferredObjectLiteral* deferred = new DeferredObjectLiteral(this, node);
@@ -2793,7 +2798,7 @@
 void DeferredArrayLiteral::Generate() {
   // Argument is passed in r1.
   enter()->Bind();
-  VirtualFrame::SpilledScope spilled_scope(generator());
+  VirtualFrame::SpilledScope spilled_scope;
 
   // If the entry is undefined we call the runtime system to computed
   // the literal.
@@ -2819,7 +2824,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ ArrayLiteral");
 
   DeferredArrayLiteral* deferred = new DeferredArrayLiteral(this, node);
@@ -2893,7 +2898,7 @@
   int original_height = frame_->height();
 #endif
   ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   // Call runtime routine to allocate the catch extension object and
   // assign the exception value to the catch variable.
   Comment cmnt(masm_, "[ CatchExtensionObject");
@@ -2910,7 +2915,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Assignment");
   CodeForStatementPosition(node);
 
@@ -2978,7 +2983,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Throw");
 
   LoadAndSpill(node->exception());
@@ -2993,7 +2998,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Property");
 
   { Reference property(this, node);
@@ -3007,7 +3012,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Call");
 
   ZoneList<Expression*>* args = node->arguments();
@@ -3050,7 +3055,8 @@
     }
 
     // Setup the receiver register and call the IC initialization code.
-    Handle<Code> stub = ComputeCallInitialize(arg_count);
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
     CodeForSourcePosition(node->position());
     frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
                            arg_count + 1);
@@ -3101,7 +3107,8 @@
       }
 
       // Set the receiver register and call the IC initialization code.
-      Handle<Code> stub = ComputeCallInitialize(arg_count);
+      InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+      Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
       CodeForSourcePosition(node->position());
       frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
       __ ldr(cp, frame_->Context());
@@ -3156,7 +3163,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ CallEval");
 
   // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
@@ -3199,7 +3206,8 @@
   // Call the function.
   CodeForSourcePosition(node->position());
 
-  CallFunctionStub call_function(arg_count);
+  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub call_function(arg_count, in_loop);
   frame_->CallStub(&call_function, arg_count + 1);
 
   __ ldr(cp, frame_->Context());
@@ -3214,7 +3222,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ CallNew");
   CodeForStatementPosition(node);
 
@@ -3264,9 +3272,9 @@
 
 
 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 1);
-  JumpTarget leave(this);
+  JumpTarget leave;
   LoadAndSpill(args->at(0));
   frame_->EmitPop(r0);  // r0 contains object.
   // if (object->IsSmi()) return the object.
@@ -3286,9 +3294,9 @@
 
 
 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 2);
-  JumpTarget leave(this);
+  JumpTarget leave;
   LoadAndSpill(args->at(0));  // Load the object.
   LoadAndSpill(args->at(1));  // Load the value.
   frame_->EmitPop(r0);  // r0 contains value
@@ -3314,7 +3322,7 @@
 
 
 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 1);
   LoadAndSpill(args->at(0));
   frame_->EmitPop(r0);
@@ -3324,7 +3332,7 @@
 
 
 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
   ASSERT_EQ(args->length(), 3);
 #ifdef ENABLE_LOGGING_AND_PROFILING
@@ -3340,7 +3348,7 @@
 
 
 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 1);
   LoadAndSpill(args->at(0));
   frame_->EmitPop(r0);
@@ -3353,7 +3361,7 @@
 // undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
 // It is not yet implemented on ARM, so it always goes to the slow case.
 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 2);
   __ mov(r0, Operand(Factory::undefined_value()));
   frame_->EmitPush(r0);
@@ -3361,10 +3369,10 @@
 
 
 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 1);
   LoadAndSpill(args->at(0));
-  JumpTarget answer(this);
+  JumpTarget answer;
   // We need the CC bits to come out as not_equal in the case where the
   // object is a smi.  This can't be done with the usual test opcode so
   // we use XOR to get the right CC bits.
@@ -3383,7 +3391,7 @@
 
 
 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 0);
 
   // Seed the result with the formal parameters count, which will be used
@@ -3398,7 +3406,7 @@
 
 
 void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 1);
 
   // Satisfy contract with ArgumentsAccessStub:
@@ -3415,7 +3423,7 @@
 
 
 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -3432,7 +3440,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   if (CheckForInlineRuntimeCall(node)) {
     ASSERT((has_cc() && frame_->height() == original_height) ||
            (!has_cc() && frame_->height() == original_height + 1));
@@ -3461,7 +3469,8 @@
 
   if (function == NULL) {
     // Call the JS runtime function.
-    Handle<Code> stub = ComputeCallInitialize(arg_count);
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
     frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
     __ ldr(cp, frame_->Context());
     frame_->Drop();
@@ -3479,7 +3488,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ UnaryOperation");
 
   Token::Value op = node->op();
@@ -3568,8 +3577,8 @@
 
       case Token::BIT_NOT: {
         // smi check
-        JumpTarget smi_label(this);
-        JumpTarget continue_label(this);
+        JumpTarget smi_label;
+        JumpTarget continue_label;
         __ tst(r0, Operand(kSmiTagMask));
         smi_label.Branch(eq);
 
@@ -3595,7 +3604,7 @@
 
       case Token::ADD: {
         // Smi check.
-        JumpTarget continue_label(this);
+        JumpTarget continue_label;
         __ tst(r0, Operand(kSmiTagMask));
         continue_label.Branch(eq);
         frame_->EmitPush(r0);
@@ -3620,7 +3629,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ CountOperation");
 
   bool is_postfix = node->is_postfix();
@@ -3649,8 +3658,8 @@
     target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
     frame_->EmitPop(r0);
 
-    JumpTarget slow(this);
-    JumpTarget exit(this);
+    JumpTarget slow;
+    JumpTarget exit;
 
     // Load the value (1) into register r1.
     __ mov(r1, Operand(Smi::FromInt(1)));
@@ -3722,7 +3731,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ BinaryOperation");
   Token::Value op = node->op();
 
@@ -3739,7 +3748,7 @@
   // of compiling the binary operation is materialized or not.
 
   if (op == Token::AND) {
-    JumpTarget is_true(this);
+    JumpTarget is_true;
     LoadConditionAndSpill(node->left(),
                           NOT_INSIDE_TYPEOF,
                           &is_true,
@@ -3757,8 +3766,8 @@
                             false);
 
     } else {
-      JumpTarget pop_and_continue(this);
-      JumpTarget exit(this);
+      JumpTarget pop_and_continue;
+      JumpTarget exit;
 
       __ ldr(r0, frame_->Top());  // dup the stack top
       frame_->EmitPush(r0);
@@ -3781,7 +3790,7 @@
     }
 
   } else if (op == Token::OR) {
-    JumpTarget is_false(this);
+    JumpTarget is_false;
     LoadConditionAndSpill(node->left(),
                           NOT_INSIDE_TYPEOF,
                           true_target(),
@@ -3799,8 +3808,8 @@
                             false);
 
     } else {
-      JumpTarget pop_and_continue(this);
-      JumpTarget exit(this);
+      JumpTarget pop_and_continue;
+      JumpTarget exit;
 
       __ ldr(r0, frame_->Top());
       frame_->EmitPush(r0);
@@ -3872,7 +3881,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   __ ldr(r0, frame_->Function());
   frame_->EmitPush(r0);
   ASSERT(frame_->height() == original_height + 1);
@@ -3883,7 +3892,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ CompareOperation");
 
   // Get the expressions from the node.
@@ -4241,7 +4250,7 @@
       } else {
         ASSERT(!slot->var()->is_dynamic());
 
-        JumpTarget exit(cgen_);
+        JumpTarget exit;
         if (init_state == CONST_INIT) {
           ASSERT(slot->var()->mode() == Variable::CONST);
           // Only the first const initialization must be executed (the slot
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index c098acd..24033cb 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -28,7 +28,8 @@
 #ifndef V8_ARM_CODEGEN_ARM_H_
 #define V8_ARM_CODEGEN_ARM_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Forward declarations
 class DeferredCode;
@@ -205,6 +206,8 @@
   JumpTarget* true_target() const  { return state_->true_target(); }
   JumpTarget* false_target() const  { return state_->false_target(); }
 
+  // We don't track loop nesting level on ARM yet.
+  int loop_nesting() const { return 0; }
 
   // Node visitors.
   void VisitStatements(ZoneList<Statement*>* statements);
@@ -317,8 +320,7 @@
   Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
-  Handle<Code> ComputeCallInitialize(int argc);
-  Handle<Code> ComputeCallInitializeInLoop(int argc);
+  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
 
   // Declare global variables and functions in the given array of
   // name/value pairs.
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 66c6a8d..99eab23 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -28,7 +28,8 @@
 #ifndef V8_ARM_CONSTANTS_ARM_H_
 #define V8_ARM_CONSTANTS_ARM_H_
 
-namespace assembler { namespace arm {
+namespace assembler {
+namespace arm {
 
 // Defines constants and accessor classes to assemble, disassemble and
 // simulate ARM instructions.
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 7369661..71da1ec 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -34,7 +34,8 @@
 
 #include "cpu.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 void CPU::Setup() {
   // Nothing to do.
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index f86f981..bcfab6c 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -30,7 +30,8 @@
 #include "codegen-inl.h"
 #include "debug.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 // Currently debug break is not supported in frame exit code on ARM.
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 3b7474d..f56a599 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -62,7 +62,8 @@
 #include "platform.h"
 
 
-namespace assembler { namespace arm {
+namespace assembler {
+namespace arm {
 
 namespace v8i = v8::internal;
 
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index d26198a..6fde4b7 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -31,7 +31,8 @@
 #include "arm/assembler-arm-inl.h"
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 StackFrame::Type StackFrame::ComputeType(State* state) {
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 9a18f3d..a67b18a 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -28,7 +28,8 @@
 #ifndef V8_ARM_FRAMES_ARM_H_
 #define V8_ARM_FRAMES_ARM_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // The ARM ABI does not specify the usage of register r9, which may be reserved
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index b07c474..9b45c46 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -32,7 +32,8 @@
 #include "runtime.h"
 #include "stub-cache.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // ----------------------------------------------------------------------------
@@ -211,7 +212,7 @@
 
   // Probe the stub cache.
   Code::Flags flags =
-      Code::ComputeFlags(Code::CALL_IC, MONOMORPHIC, NORMAL, argc);
+      Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
   StubCache::GenerateProbe(masm, flags, r1, r2, r3);
 
   // If the stub cache probing failed, the receiver might be a value.
@@ -422,7 +423,9 @@
 
   __ ldr(r0, MemOperand(sp, 0));
   // Probe the stub cache.
-  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
+  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
   StubCache::GenerateProbe(masm, flags, r0, r2, r3);
 
   // Cache miss: Jump to runtime.
@@ -755,7 +758,9 @@
 
   // Get the receiver from the stack and probe the stub cache.
   __ ldr(r1, MemOperand(sp));
-  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC);
+  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
   StubCache::GenerateProbe(masm, flags, r1, r2, r3);
 
   // Cache miss: Jump to runtime.
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
index b8b6323..ef07967 100644
--- a/src/arm/jump-target-arm.cc
+++ b/src/arm/jump-target-arm.cc
@@ -28,28 +28,30 @@
 #include "v8.h"
 
 #include "codegen-inl.h"
+#include "jump-target-inl.h"
 #include "register-allocator-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // JumpTarget implementation.
 
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(cgen()->masm())
 
 void JumpTarget::DoJump() {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen()->has_valid_frame());
   // Live non-frame registers are not allowed at unconditional jumps
   // because we have no way of invalidating the corresponding results
   // which are still live in the C++ code.
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
 
   if (is_bound()) {
     // Backward jump.  There is an expected frame to merge to.
     ASSERT(direction_ == BIDIRECTIONAL);
-    cgen_->frame()->MergeTo(entry_frame_);
-    cgen_->DeleteFrame();
+    cgen()->frame()->PrepareMergeTo(entry_frame_);
+    cgen()->frame()->MergeTo(entry_frame_);
+    cgen()->DeleteFrame();
     __ jmp(&entry_label_);
   } else {
     // Preconfigured entry frame is not used on ARM.
@@ -57,17 +59,16 @@
     // Forward jump.  The current frame is added to the end of the list
     // of frames reaching the target block and a jump to the merge code
     // is emitted.
-    AddReachingFrame(cgen_->frame());
+    AddReachingFrame(cgen()->frame());
     RegisterFile empty;
-    cgen_->SetFrame(NULL, &empty);
+    cgen()->SetFrame(NULL, &empty);
     __ jmp(&merge_labels_.last());
   }
 }
 
 
 void JumpTarget::DoBranch(Condition cc, Hint ignored) {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen()->has_valid_frame());
 
   if (is_bound()) {
     ASSERT(direction_ == BIDIRECTIONAL);
@@ -77,29 +78,29 @@
     // Swap the current frame for a copy (we do the swapping to get
     // the off-frame registers off the fall through) to use for the
     // branch.
-    VirtualFrame* fall_through_frame = cgen_->frame();
+    VirtualFrame* fall_through_frame = cgen()->frame();
     VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
     RegisterFile non_frame_registers = RegisterAllocator::Reserved();
-    cgen_->SetFrame(branch_frame, &non_frame_registers);
+    cgen()->SetFrame(branch_frame, &non_frame_registers);
 
     // Check if we can avoid merge code.
-    cgen_->frame()->PrepareMergeTo(entry_frame_);
-    if (cgen_->frame()->Equals(entry_frame_)) {
+    cgen()->frame()->PrepareMergeTo(entry_frame_);
+    if (cgen()->frame()->Equals(entry_frame_)) {
       // Branch right in to the block.
-      cgen_->DeleteFrame();
+      cgen()->DeleteFrame();
       __ b(cc, &entry_label_);
-      cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+      cgen()->SetFrame(fall_through_frame, &non_frame_registers);
       return;
     }
 
     // Check if we can reuse existing merge code.
     for (int i = 0; i < reaching_frames_.length(); i++) {
       if (reaching_frames_[i] != NULL &&
-          cgen_->frame()->Equals(reaching_frames_[i])) {
+          cgen()->frame()->Equals(reaching_frames_[i])) {
         // Branch to the merge code.
-        cgen_->DeleteFrame();
+        cgen()->DeleteFrame();
         __ b(cc, &merge_labels_[i]);
-        cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+        cgen()->SetFrame(fall_through_frame, &non_frame_registers);
         return;
       }
     }
@@ -108,10 +109,10 @@
     // around the merge code on the fall through path.
     Label original_fall_through;
     __ b(NegateCondition(cc), &original_fall_through);
-    cgen_->frame()->MergeTo(entry_frame_);
-    cgen_->DeleteFrame();
+    cgen()->frame()->MergeTo(entry_frame_);
+    cgen()->DeleteFrame();
     __ b(&entry_label_);
-    cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+    cgen()->SetFrame(fall_through_frame, &non_frame_registers);
     __ bind(&original_fall_through);
 
   } else {
@@ -120,7 +121,7 @@
     // Forward branch.  A copy of the current frame is added to the end
     // of the list of frames reaching the target block and a branch to
     // the merge code is emitted.
-    AddReachingFrame(new VirtualFrame(cgen_->frame()));
+    AddReachingFrame(new VirtualFrame(cgen()->frame()));
     __ b(cc, &merge_labels_.last());
   }
 }
@@ -133,14 +134,13 @@
   // at the label (which should be the only one) is the spilled current
   // frame plus an in-memory return address.  The "fall-through" frame
   // at the return site is the spilled current frame.
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen()->has_valid_frame());
   // There are no non-frame references across the call.
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
   ASSERT(!is_linked());
 
-  cgen_->frame()->SpillAll();
-  VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
+  cgen()->frame()->SpillAll();
+  VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
   target_frame->Adjust(1);
   // We do not expect a call with a preconfigured entry frame.
   ASSERT(entry_frame_ == NULL);
@@ -150,20 +150,19 @@
 
 
 void JumpTarget::DoBind(int mergable_elements) {
-  ASSERT(cgen_ != NULL);
   ASSERT(!is_bound());
 
   // Live non-frame registers are not allowed at the start of a basic
   // block.
-  ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
+  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
 
   if (direction_ == FORWARD_ONLY) {
     // A simple case: no forward jumps and no possible backward jumps.
     if (!is_linked()) {
       // The stack pointer can be floating above the top of the
       // virtual frame before the bind.  Afterward, it should not.
-      ASSERT(cgen_->has_valid_frame());
-      VirtualFrame* frame = cgen_->frame();
+      ASSERT(cgen()->has_valid_frame());
+      VirtualFrame* frame = cgen()->frame();
       int difference =
           frame->stack_pointer_ - (frame->elements_.length() - 1);
       if (difference > 0) {
@@ -176,12 +175,12 @@
 
     // Another simple case: no fall through, a single forward jump,
     // and no possible backward jumps.
-    if (!cgen_->has_valid_frame() && reaching_frames_.length() == 1) {
+    if (!cgen()->has_valid_frame() && reaching_frames_.length() == 1) {
       // Pick up the only reaching frame, take ownership of it, and
       // use it for the block about to be emitted.
       VirtualFrame* frame = reaching_frames_[0];
       RegisterFile reserved = RegisterAllocator::Reserved();
-      cgen_->SetFrame(frame, &reserved);
+      cgen()->SetFrame(frame, &reserved);
       reaching_frames_[0] = NULL;
       __ bind(&merge_labels_[0]);
 
@@ -201,11 +200,11 @@
   // If there is a current frame, record it as the fall-through.  It
   // is owned by the reaching frames for now.
   bool had_fall_through = false;
-  if (cgen_->has_valid_frame()) {
+  if (cgen()->has_valid_frame()) {
     had_fall_through = true;
-    AddReachingFrame(cgen_->frame());  // Return value ignored.
+    AddReachingFrame(cgen()->frame());  // Return value ignored.
     RegisterFile empty;
-    cgen_->SetFrame(NULL, &empty);
+    cgen()->SetFrame(NULL, &empty);
   }
 
   // Compute the frame to use for entry to the block.
@@ -242,17 +241,17 @@
           // binding site or as the fall through from a previous merge
           // code block.  Jump around the code we are about to
           // generate.
-          if (cgen_->has_valid_frame()) {
-            cgen_->DeleteFrame();
+          if (cgen()->has_valid_frame()) {
+            cgen()->DeleteFrame();
             __ b(&entry_label_);
           }
           // Pick up the frame for this block.  Assume ownership if
           // there cannot be backward jumps.
           RegisterFile reserved = RegisterAllocator::Reserved();
           if (direction_ == BIDIRECTIONAL) {
-            cgen_->SetFrame(new VirtualFrame(frame), &reserved);
+            cgen()->SetFrame(new VirtualFrame(frame), &reserved);
           } else {
-            cgen_->SetFrame(frame, &reserved);
+            cgen()->SetFrame(frame, &reserved);
             reaching_frames_[i] = NULL;
           }
           __ bind(&merge_labels_[i]);
@@ -261,7 +260,7 @@
           // looking for any that can share merge code with this one.
           for (int j = 0; j < i; j++) {
             VirtualFrame* other = reaching_frames_[j];
-            if (other != NULL && other->Equals(cgen_->frame())) {
+            if (other != NULL && other->Equals(cgen()->frame())) {
               // Set the reaching frame element to null to avoid
               // processing it later, and then bind its entry label.
               reaching_frames_[j] = NULL;
@@ -270,13 +269,13 @@
           }
 
           // Emit the merge code.
-          cgen_->frame()->MergeTo(entry_frame_);
+          cgen()->frame()->MergeTo(entry_frame_);
         } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
           // If this is the fall through, and it didn't need merge
           // code, we need to pick up the frame so we can jump around
           // subsequent merge blocks if necessary.
           RegisterFile reserved = RegisterAllocator::Reserved();
-          cgen_->SetFrame(frame, &reserved);
+          cgen()->SetFrame(frame, &reserved);
           reaching_frames_[i] = NULL;
         }
       }
@@ -285,9 +284,9 @@
     // The code generator may not have a current frame if there was no
     // fall through and none of the reaching frames needed merging.
     // In that case, clone the entry frame as the current frame.
-    if (!cgen_->has_valid_frame()) {
+    if (!cgen()->has_valid_frame()) {
       RegisterFile reserved_registers = RegisterAllocator::Reserved();
-      cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
+      cgen()->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
     }
 
     // There may be unprocessed reaching frames that did not need
@@ -313,9 +312,9 @@
     // Use a copy of the reaching frame so the original can be saved
     // for possible reuse as a backward merge block.
     RegisterFile reserved = RegisterAllocator::Reserved();
-    cgen_->SetFrame(new VirtualFrame(reaching_frames_[0]), &reserved);
+    cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &reserved);
     __ bind(&merge_labels_[0]);
-    cgen_->frame()->MergeTo(entry_frame_);
+    cgen()->frame()->MergeTo(entry_frame_);
   }
 
   __ bind(&entry_label_);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index ee82da1..4e24063 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -32,7 +32,8 @@
 #include "debug.h"
 #include "runtime.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Give alias names to registers
 Register cp = {  8 };  // JavaScript context pointer
@@ -58,7 +59,10 @@
 // We do not support thumb inter-working with an arm architecture not supporting
 // the blx instruction (below v5t)
 #if defined(__THUMB_INTERWORK__)
-#if !defined(__ARM_ARCH_5T__) && !defined(__ARM_ARCH_5TE__)
+#if !defined(__ARM_ARCH_5T__) && \
+  !defined(__ARM_ARCH_5TE__) &&  \
+  !defined(__ARM_ARCH_7A__) &&   \
+  !defined(__ARM_ARCH_7__)
 // add tests for other versions above v5t as required
 #error "for thumb inter-working we require architecture v5t or above"
 #endif
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index e336757..27eeab2 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -30,7 +30,8 @@
 
 #include "assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // Give alias names to registers
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index bf07f0e..78ebc7e 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -30,7 +30,8 @@
 #include "regexp-macro-assembler.h"
 #include "arm/regexp-macro-assembler-arm.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 RegExpMacroAssemblerARM::RegExpMacroAssemblerARM() {
   UNIMPLEMENTED();
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 2f38bb7..de55183 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -28,7 +28,8 @@
 #ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
 #define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
  public:
diff --git a/src/arm/register-allocator-arm.cc b/src/arm/register-allocator-arm.cc
index 5cef9c4..0d90129 100644
--- a/src/arm/register-allocator-arm.cc
+++ b/src/arm/register-allocator-arm.cc
@@ -30,7 +30,8 @@
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // Result implementation.
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 9737e95..b8b6663 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -36,7 +36,8 @@
 #if !defined(__arm__)
 
 // Only build the simulator if not compiling for real ARM hardware.
-namespace assembler { namespace arm {
+namespace assembler {
+namespace arm {
 
 using ::v8::internal::Object;
 using ::v8::internal::PrintF;
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 2029fd3..d4a395a 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -66,7 +66,8 @@
 #include "constants-arm.h"
 
 
-namespace assembler { namespace arm {
+namespace assembler {
+namespace arm {
 
 class Simulator {
  public:
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 56afa02..3eb8269 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,8 @@
 #include "codegen-inl.h"
 #include "stub-cache.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #define __ ACCESS_MASM(masm)
 
@@ -61,7 +62,7 @@
 
   // Check that the flags match what we're looking for.
   __ ldr(offset, FieldMemOperand(offset, Code::kFlagsOffset));
-  __ and_(offset, offset, Operand(~Code::kFlagsTypeMask));
+  __ and_(offset, offset, Operand(~Code::kFlagsNotUsedInLookup));
   __ cmp(offset, Operand(flags));
   __ b(ne, &miss);
 
@@ -494,7 +495,9 @@
 Object* CallStubCompiler::CompileCallField(Object* object,
                                            JSObject* holder,
                                            int index,
-                                           String* name) {
+                                           String* name,
+                                           Code::Flags flags) {
+  ASSERT_EQ(FIELD, Code::ExtractTypeFromFlags(flags));
   // ----------- S t a t e -------------
   //  -- lr: return address
   // -----------------------------------
@@ -538,14 +541,16 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(FIELD, name);
+  return GetCodeWithFlags(flags, name);
 }
 
 
 Object* CallStubCompiler::CompileCallConstant(Object* object,
                                               JSObject* holder,
                                               JSFunction* function,
-                                              CheckType check) {
+                                              CheckType check,
+                                              Code::Flags flags) {
+  ASSERT_EQ(CONSTANT_FUNCTION, Code::ExtractTypeFromFlags(flags));
   // ----------- S t a t e -------------
   //  -- lr: return address
   // -----------------------------------
@@ -663,7 +668,7 @@
   if (function->shared()->name()->IsString()) {
     function_name = String::cast(function->shared()->name());
   }
-  return GetCode(CONSTANT_FUNCTION, function_name);
+  return GetCodeWithFlags(flags, function_name);
 }
 
 
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 6ddf06c..e51f963 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -31,28 +31,22 @@
 #include "register-allocator-inl.h"
 #include "scopes.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // VirtualFrame implementation.
 
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
 
 
 // On entry to a function, the virtual frame already contains the
 // receiver and the parameters.  All initial frame elements are in
 // memory.
-VirtualFrame::VirtualFrame(CodeGenerator* cgen)
-    : cgen_(cgen),
-      masm_(cgen->masm()),
-      elements_(cgen->scope()->num_parameters()
-                + cgen->scope()->num_stack_slots()
-                + kPreallocatedElements),
-      parameter_count_(cgen->scope()->num_parameters()),
-      local_count_(0),
-      stack_pointer_(parameter_count_),  // 0-based index of TOS.
-      frame_pointer_(kIllegalIndex) {
-  for (int i = 0; i < parameter_count_ + 1; i++) {
+VirtualFrame::VirtualFrame()
+    : elements_(parameter_count() + local_count() + kPreallocatedElements),
+      stack_pointer_(parameter_count()) {  // 0-based index of TOS.
+  for (int i = 0; i <= stack_pointer_; i++) {
     elements_.Add(FrameElement::MemoryElement());
   }
   for (int i = 0; i < kNumRegisters; i++) {
@@ -82,10 +76,10 @@
 
 
 void VirtualFrame::MergeTo(VirtualFrame* expected) {
-  Comment cmnt(masm_, "[ Merge frame");
+  Comment cmnt(masm(), "[ Merge frame");
   // We should always be merging the code generator's current frame to an
   // expected frame.
-  ASSERT(cgen_->frame() == this);
+  ASSERT(cgen()->frame() == this);
 
   // Adjust the stack pointer upward (toward the top of the virtual
   // frame) if necessary.
@@ -152,7 +146,7 @@
 
 
 void VirtualFrame::Enter() {
-  Comment cmnt(masm_, "[ Enter JS frame");
+  Comment cmnt(masm(), "[ Enter JS frame");
 
 #ifdef DEBUG
   // Verify that r1 contains a JS function.  The following code relies
@@ -175,15 +169,14 @@
   Adjust(4);
   __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
   // Adjust FP to point to saved FP.
-  frame_pointer_ = elements_.length() - 2;
   __ add(fp, sp, Operand(2 * kPointerSize));
-  cgen_->allocator()->Unuse(r1);
-  cgen_->allocator()->Unuse(lr);
+  cgen()->allocator()->Unuse(r1);
+  cgen()->allocator()->Unuse(lr);
 }
 
 
 void VirtualFrame::Exit() {
-  Comment cmnt(masm_, "[ Exit JS frame");
+  Comment cmnt(masm(), "[ Exit JS frame");
   // Drop the execution stack down to the frame pointer and restore the caller
   // frame pointer and return address.
   __ mov(sp, fp);
@@ -191,12 +184,11 @@
 }
 
 
-void VirtualFrame::AllocateStackSlots(int count) {
-  ASSERT(height() == 0);
-  local_count_ = count;
-  Adjust(count);
+void VirtualFrame::AllocateStackSlots() {
+  int count = local_count();
   if (count > 0) {
-    Comment cmnt(masm_, "[ Allocate space for locals");
+    Comment cmnt(masm(), "[ Allocate space for locals");
+    Adjust(count);
       // Initialize stack slots with 'undefined' value.
     __ mov(ip, Operand(Factory::undefined_value()));
     for (int i = 0; i < count; i++) {
@@ -246,9 +238,9 @@
 
 
 Result VirtualFrame::RawCallStub(CodeStub* stub) {
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
   __ CallStub(stub);
-  Result result = cgen_->allocator()->Allocate(r0);
+  Result result = cgen()->allocator()->Allocate(r0);
   ASSERT(result.is_valid());
   return result;
 }
@@ -271,9 +263,9 @@
 
 Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
   PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
   __ CallRuntime(f, arg_count);
-  Result result = cgen_->allocator()->Allocate(r0);
+  Result result = cgen()->allocator()->Allocate(r0);
   ASSERT(result.is_valid());
   return result;
 }
@@ -281,9 +273,9 @@
 
 Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
   PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
   __ CallRuntime(id, arg_count);
-  Result result = cgen_->allocator()->Allocate(r0);
+  Result result = cgen()->allocator()->Allocate(r0);
   ASSERT(result.is_valid());
   return result;
 }
@@ -297,16 +289,16 @@
   PrepareForCall(arg_count, arg_count);
   arg_count_register->Unuse();
   __ InvokeBuiltin(id, flags);
-  Result result = cgen_->allocator()->Allocate(r0);
+  Result result = cgen()->allocator()->Allocate(r0);
   return result;
 }
 
 
 Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
                                        RelocInfo::Mode rmode) {
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
   __ Call(code, rmode);
-  Result result = cgen_->allocator()->Allocate(r0);
+  Result result = cgen()->allocator()->Allocate(r0);
   ASSERT(result.is_valid());
   return result;
 }
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index 24bc4f4..07e1be0 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -29,8 +29,10 @@
 #define V8_ARM_VIRTUAL_FRAME_ARM_H_
 
 #include "register-allocator.h"
+#include "scopes.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // Virtual frames
@@ -50,29 +52,39 @@
   // generator is being transformed.
   class SpilledScope BASE_EMBEDDED {
    public:
-    explicit SpilledScope(CodeGenerator* cgen);
+    SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
+      ASSERT(cgen()->has_valid_frame());
+      cgen()->frame()->SpillAll();
+      cgen()->set_in_spilled_code(true);
+    }
 
-    ~SpilledScope();
+    ~SpilledScope() {
+      cgen()->set_in_spilled_code(previous_state_);
+    }
 
    private:
-    CodeGenerator* cgen_;
     bool previous_state_;
+
+    CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
   };
 
   // An illegal index into the virtual frame.
   static const int kIllegalIndex = -1;
 
   // Construct an initial virtual frame on entry to a JS function.
-  explicit VirtualFrame(CodeGenerator* cgen);
+  VirtualFrame();
 
   // Construct a virtual frame as a clone of an existing one.
   explicit VirtualFrame(VirtualFrame* original);
 
+  CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+  MacroAssembler* masm() { return cgen()->masm(); }
+
   // Create a duplicate of an existing valid frame element.
   FrameElement CopyElementAt(int index);
 
   // The height of the virtual expression stack.
-  int height() const {
+  int height() {
     return elements_.length() - expression_base_index();
   }
 
@@ -95,7 +107,12 @@
 
   // Forget elements from the top of the frame to match an actual frame (eg,
   // the frame after a runtime call).  No code is emitted.
-  void Forget(int count);
+  void Forget(int count) {
+    ASSERT(count >= 0);
+    ASSERT(stack_pointer_ == elements_.length() - 1);
+    stack_pointer_ -= count;
+    ForgetElements(count);
+  }
 
   // Forget count elements from the top of the frame without adjusting
   // the stack pointer downward.  This is used, for example, before
@@ -106,7 +123,9 @@
   void SpillAll();
 
   // Spill all occurrences of a specific register from the frame.
-  void Spill(Register reg);
+  void Spill(Register reg) {
+    if (is_used(reg)) SpillElementAt(register_index(reg));
+  }
 
   // Spill all occurrences of an arbitrary register if possible.  Return the
   // register spilled or no_reg if it was not possible to free any register
@@ -128,7 +147,7 @@
   // registers.  Used when the code generator's frame is switched from this
   // one to NULL by an unconditional jump.
   void DetachFromCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen_->allocator();
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
     for (int i = 0; i < kNumRegisters; i++) {
       if (is_used(i)) {
         Register temp = { i };
@@ -142,7 +161,7 @@
   // Used when a code generator's frame is switched from NULL to this one by
   // binding a label.
   void AttachToCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen_->allocator();
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
     for (int i = 0; i < kNumRegisters; i++) {
       if (is_used(i)) {
         Register temp = { i };
@@ -165,13 +184,13 @@
   void PrepareForReturn();
 
   // Allocate and initialize the frame-allocated locals.
-  void AllocateStackSlots(int count);
+  void AllocateStackSlots();
 
   // The current top of the expression stack as an assembly operand.
-  MemOperand Top() const { return MemOperand(sp, 0); }
+  MemOperand Top() { return MemOperand(sp, 0); }
 
   // An element of the expression stack as an assembly operand.
-  MemOperand ElementAt(int index) const {
+  MemOperand ElementAt(int index) {
     return MemOperand(sp, index * kPointerSize);
   }
 
@@ -190,9 +209,9 @@
   }
 
   // A frame-allocated local as an assembly operand.
-  MemOperand LocalAt(int index) const {
+  MemOperand LocalAt(int index) {
     ASSERT(0 <= index);
-    ASSERT(index < local_count_);
+    ASSERT(index < local_count());
     return MemOperand(fp, kLocal0Offset - index * kPointerSize);
   }
 
@@ -218,13 +237,13 @@
   void PushReceiverSlotAddress();
 
   // The function frame slot.
-  MemOperand Function() const { return MemOperand(fp, kFunctionOffset); }
+  MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
 
   // Push the function on top of the frame.
   void PushFunction() { PushFrameSlotAt(function_index()); }
 
   // The context frame slot.
-  MemOperand Context() const { return MemOperand(fp, kContextOffset); }
+  MemOperand Context() { return MemOperand(fp, kContextOffset); }
 
   // Save the value of the esi register to the context frame slot.
   void SaveContextRegister();
@@ -234,10 +253,11 @@
   void RestoreContextRegister();
 
   // A parameter as an assembly operand.
-  MemOperand ParameterAt(int index) const {
+  MemOperand ParameterAt(int index) {
     // Index -1 corresponds to the receiver.
-    ASSERT(-1 <= index && index <= parameter_count_);
-    return MemOperand(fp, (1 + parameter_count_ - index) * kPointerSize);
+    ASSERT(-1 <= index);  // -1 is the receiver.
+    ASSERT(index <= parameter_count());
+    return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
   }
 
   // Push a copy of the value of a parameter frame slot on top of the frame.
@@ -259,14 +279,17 @@
   }
 
   // The receiver frame slot.
-  MemOperand Receiver() const { return ParameterAt(-1); }
+  MemOperand Receiver() { return ParameterAt(-1); }
 
   // Push a try-catch or try-finally handler on top of the virtual frame.
   void PushTryHandler(HandlerType type);
 
   // Call stub given the number of arguments it expects on (and
   // removes from) the stack.
-  Result CallStub(CodeStub* stub, int arg_count);
+  Result CallStub(CodeStub* stub, int arg_count) {
+    PrepareForCall(arg_count, arg_count);
+    return RawCallStub(stub);
+  }
 
   // Call stub that expects its argument in r0.  The argument is given
   // as a result which must be the register r0.
@@ -333,7 +356,15 @@
   void Push(Smi* value) { Push(Handle<Object>(value)); }
 
   // Pushing a result invalidates it (its contents become owned by the frame).
-  void Push(Result* result);
+  void Push(Result* result) {
+    if (result->is_register()) {
+      Push(result->reg(), result->static_type());
+    } else {
+      ASSERT(result->is_constant());
+      Push(result->handle());
+    }
+    result->Unuse();
+  }
 
   // Nip removes zero or more elements from immediately below the top
   // of the frame, leaving the previous top-of-frame value on top of
@@ -348,70 +379,69 @@
   static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
   static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
 
-  CodeGenerator* cgen_;
-  MacroAssembler* masm_;
-
   ZoneList<FrameElement> elements_;
 
-  // The number of frame-allocated locals and parameters respectively.
-  int parameter_count_;
-  int local_count_;
-
   // The index of the element that is at the processor's stack pointer
   // (the sp register).
   int stack_pointer_;
 
-  // The index of the element that is at the processor's frame pointer
-  // (the fp register).
-  int frame_pointer_;
-
   // The index of the register frame element using each register, or
   // kIllegalIndex if a register is not on the frame.
   int register_locations_[kNumRegisters];
 
+  // The number of frame-allocated locals and parameters respectively.
+  int parameter_count() { return cgen()->scope()->num_parameters(); }
+  int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+  // The index of the element that is at the processor's frame pointer
+  // (the fp register).  The parameters, receiver, function, and context
+  // are below the frame pointer.
+  int frame_pointer() { return parameter_count() + 3; }
+
   // The index of the first parameter.  The receiver lies below the first
   // parameter.
-  int param0_index() const { return 1; }
+  int param0_index() { return 1; }
 
-  // The index of the context slot in the frame.
-  int context_index() const {
-    ASSERT(frame_pointer_ != kIllegalIndex);
-    return frame_pointer_ - 1;
-  }
+  // The index of the context slot in the frame.  It is immediately
+  // below the frame pointer.
+  int context_index() { return frame_pointer() - 1; }
 
-  // The index of the function slot in the frame.  It lies above the context
-  // slot.
-  int function_index() const {
-    ASSERT(frame_pointer_ != kIllegalIndex);
-    return frame_pointer_ - 2;
-  }
+  // The index of the function slot in the frame.  It is below the frame
+  // pointer and context slot.
+  int function_index() { return frame_pointer() - 2; }
 
-  // The index of the first local.  Between the parameters and the locals
-  // lie the return address, the saved frame pointer, the context, and the
-  // function.
-  int local0_index() const {
-    ASSERT(frame_pointer_ != kIllegalIndex);
-    return frame_pointer_ + 2;
-  }
+  // The index of the first local.  Between the frame pointer and the
+  // locals lies the return address.
+  int local0_index() { return frame_pointer() + 2; }
 
   // The index of the base of the expression stack.
-  int expression_base_index() const { return local0_index() + local_count_; }
+  int expression_base_index() { return local0_index() + local_count(); }
 
   // Convert a frame index into a frame pointer relative offset into the
   // actual stack.
-  int fp_relative(int index) const {
-    return (frame_pointer_ - index) * kPointerSize;
+  int fp_relative(int index) {
+    ASSERT(index < elements_.length());
+    ASSERT(frame_pointer() < elements_.length());  // FP is on the frame.
+    return (frame_pointer() - index) * kPointerSize;
   }
 
   // Record an occurrence of a register in the virtual frame.  This has the
   // effect of incrementing the register's external reference count and
   // of updating the index of the register's location in the frame.
-  void Use(Register reg, int index);
+  void Use(Register reg, int index) {
+    ASSERT(!is_used(reg));
+    register_locations_[reg.code()] = index;
+    cgen()->allocator()->Use(reg);
+  }
 
   // Record that a register reference has been dropped from the frame.  This
   // decrements the register's external reference count and invalidates the
   // index of the register's location in the frame.
-  void Unuse(Register reg);
+  void Unuse(Register reg) {
+    ASSERT(register_locations_[reg.code()] != kIllegalIndex);
+    register_locations_[reg.code()] = kIllegalIndex;
+    cgen()->allocator()->Unuse(reg);
+  }
 
   // Spill the element at a particular index---write it to memory if
   // necessary, free any associated register, and forget its value if
diff --git a/src/assembler.cc b/src/assembler.cc
index ec0e4fd..f7b50d9 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -43,7 +43,8 @@
 #include "stub-cache.h"
 #include "regexp-stack.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // -----------------------------------------------------------------------------
diff --git a/src/assembler.h b/src/assembler.h
index 8abdbc7..3449063 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -40,7 +40,8 @@
 #include "zone-inl.h"
 #include "token.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // -----------------------------------------------------------------------------
diff --git a/src/ast.cc b/src/ast.cc
index d19e3b3..eef8da7 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -31,7 +31,8 @@
 #include "scopes.h"
 #include "string-stream.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 VariableProxySentinel VariableProxySentinel::this_proxy_(true);
diff --git a/src/ast.h b/src/ast.h
index 6a2f671..80a4aa5 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -37,7 +37,8 @@
 #include "jsregexp.h"
 #include "jump-target.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // The abstract syntax tree is an intermediate, light-weight
 // representation of the parsed JavaScript code suitable for
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 76bcc05..546490d 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -37,7 +37,8 @@
 #include "macro-assembler.h"
 #include "natives.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // A SourceCodeCache uses a FixedArray to store pairs of
 // (AsciiString*, JSFunction*), mapping names of native code files
@@ -1087,6 +1088,12 @@
     global_context()->set_empty_script(*script);
   }
 
+#ifdef V8_HOST_ARCH_64_BIT
+  // TODO(X64): Reenable remaining initialization when code generation works.
+  return true;
+#endif  // V8_HOST_ARCH_64_BIT
+
+
   if (FLAG_natives_file == NULL) {
     // Without natives file, install default natives.
     for (int i = Natives::GetDelayCount();
@@ -1523,8 +1530,8 @@
   current_  = this;
   result_ = NULL;
 
-  // If V8 hasn't been and cannot be initialized, just return.
-  if (!V8::HasBeenSetup() && !V8::Initialize(NULL)) return;
+  // If V8 isn't running and cannot be initialized, just return.
+  if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
 
   // Before creating the roots we must save the context and restore it
   // on all function exits.
@@ -1532,6 +1539,7 @@
   SaveContext context;
 
   CreateRoots(global_template, global_object);
+
   if (!InstallNatives()) return;
 
   MakeFunctionInstancePrototypeWritable();
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index e2883dc..0d743e3 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -29,7 +29,8 @@
 #ifndef V8_BOOTSTRAPPER_H_
 #define V8_BOOTSTRAPPER_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // The Boostrapper is the public interface for creating a JavaScript global
 // context.
diff --git a/src/builtins.cc b/src/builtins.cc
index 3ddb681..1c43f7a 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -32,7 +32,8 @@
 #include "builtins.h"
 #include "ic-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // ----------------------------------------------------------------------------
 // Support macros for defining builtins in C.
@@ -665,12 +666,12 @@
       Code::ComputeFlags(Code::BUILTIN)  \
     },
 
-#define DEF_FUNCTION_PTR_A(name, kind, state) \
-    { FUNCTION_ADDR(Generate_##name),         \
-      NULL,                                   \
-      #name,                                  \
-      name,                                   \
-      Code::ComputeFlags(Code::kind, state)   \
+#define DEF_FUNCTION_PTR_A(name, kind, state)              \
+    { FUNCTION_ADDR(Generate_##name),                      \
+      NULL,                                                \
+      #name,                                               \
+      name,                                                \
+      Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state)   \
     },
 
   // Define array of pointers to generators and C builtin functions.
diff --git a/src/builtins.h b/src/builtins.h
index c011f22..6e0f832 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -28,7 +28,8 @@
 #ifndef V8_BUILTINS_H_
 #define V8_BUILTINS_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Define list of builtins implemented in C.
 #define BUILTIN_LIST_C(V)                          \
diff --git a/src/bytecodes-irregexp.h b/src/bytecodes-irregexp.h
index 94f37a8..bcb34c8 100644
--- a/src/bytecodes-irregexp.h
+++ b/src/bytecodes-irregexp.h
@@ -29,7 +29,8 @@
 #ifndef V8_BYTECODES_IRREGEXP_H_
 #define V8_BYTECODES_IRREGEXP_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 static const int BYTECODE_MASK = 0xff;
diff --git a/src/char-predicates-inl.h b/src/char-predicates-inl.h
index 217db9c..fadbc9a 100644
--- a/src/char-predicates-inl.h
+++ b/src/char-predicates-inl.h
@@ -30,7 +30,8 @@
 
 #include "char-predicates.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 inline bool IsCarriageReturn(uc32 c) {
diff --git a/src/char-predicates.h b/src/char-predicates.h
index 63e83b4..dac1eb8 100644
--- a/src/char-predicates.h
+++ b/src/char-predicates.h
@@ -28,7 +28,8 @@
 #ifndef V8_CHAR_PREDICATES_H_
 #define V8_CHAR_PREDICATES_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Unicode character predicates as defined by ECMA-262, 3rd,
 // used for lexical analysis.
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 06c4dcd..b14ede1 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -32,7 +32,8 @@
 #include "factory.h"
 #include "macro-assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 Handle<Code> CodeStub::GetCode() {
   uint32_t key = GetKey();
@@ -58,7 +59,7 @@
     masm.GetCode(&desc);
 
     // Copy the generated code into a heap object, and store the major key.
-    Code::Flags flags = Code::ComputeFlags(Code::STUB);
+    Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
     Handle<Code> code = Factory::NewCode(desc, NULL, flags, masm.CodeObject());
     code->set_major_key(MajorKey());
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 67634aa..183a64a 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -28,7 +28,8 @@
 #ifndef V8_CODE_STUBS_H_
 #define V8_CODE_STUBS_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // Stub is base classes of all stubs.
@@ -82,6 +83,10 @@
   virtual Major MajorKey() = 0;
   virtual int MinorKey() = 0;
 
+  // The CallFunctionStub needs to override this so it can encode whether a
+  // lazily generated function should be fully optimized or not.
+  virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
+
   // Returns a name for logging/debugging purposes.
   virtual const char* GetName() { return MajorName(MajorKey()); }
 
diff --git a/src/code.h b/src/code.h
index 87e0794..072344b 100644
--- a/src/code.h
+++ b/src/code.h
@@ -28,7 +28,8 @@
 #ifndef V8_CODE_H_
 #define V8_CODE_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // Wrapper class for passing expected and actual parameter counts as
diff --git a/src/codegen-inl.h b/src/codegen-inl.h
index 87bdb3b..f75b302 100644
--- a/src/codegen-inl.h
+++ b/src/codegen-inl.h
@@ -32,7 +32,8 @@
 #include "codegen.h"
 #include "register-allocator-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 void DeferredCode::SetEntryFrame(Result* arg) {
diff --git a/src/codegen.cc b/src/codegen.cc
index 2c56534..51cc393 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -38,7 +38,8 @@
 #include "scopeinfo.h"
 #include "stub-cache.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 CodeGenerator* CodeGeneratorScope::top_ = NULL;
@@ -47,8 +48,7 @@
 DeferredCode::DeferredCode(CodeGenerator* generator)
   : generator_(generator),
     masm_(generator->masm()),
-    enter_(generator),
-    exit_(generator, JumpTarget::BIDIRECTIONAL),
+    exit_(JumpTarget::BIDIRECTIONAL),
     statement_position_(masm_->current_statement_position()),
     position_(masm_->current_position()) {
   generator->AddDeferred(this);
@@ -170,8 +170,9 @@
   HistogramTimerScope timer(&Counters::code_creation);
   CodeDesc desc;
   cgen.masm()->GetCode(&desc);
-  ScopeInfo<> sinfo(flit->scope());
-  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
+  ZoneScopeInfo sinfo(flit->scope());
+  InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP;
+  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
   Handle<Code> code = Factory::NewCode(desc,
                                        &sinfo,
                                        flags,
@@ -212,7 +213,7 @@
 
 bool CodeGenerator::ShouldGenerateLog(Expression* type) {
   ASSERT(type != NULL);
-  if (!Logger::is_enabled()) return false;
+  if (!Logger::IsEnabled()) return false;
   Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
   if (FLAG_log_regexp) {
     static Vector<const char> kRegexp = CStrVector("regexp");
@@ -323,17 +324,18 @@
 }
 
 
-Handle<Code> CodeGenerator::ComputeCallInitialize(int argc) {
-  CALL_HEAP_FUNCTION(StubCache::ComputeCallInitialize(argc), Code);
-}
-
-
-Handle<Code> CodeGenerator::ComputeCallInitializeInLoop(int argc) {
-  // Force the creation of the corresponding stub outside loops,
-  // because it will be used when clearing the ICs later - when we
-  // don't know if we're inside a loop or not.
-  ComputeCallInitialize(argc);
-  CALL_HEAP_FUNCTION(StubCache::ComputeCallInitializeInLoop(argc), Code);
+Handle<Code> CodeGenerator::ComputeCallInitialize(
+    int argc,
+    InLoopFlag in_loop) {
+  if (in_loop == IN_LOOP) {
+    // Force the creation of the corresponding stub outside loops,
+    // because it may be used when clearing the ICs later - it is
+    // possible for a series of IC transitions to lose the in-loop
+    // information, and the IC clearing code can't generate a stub
+    // that it needs so we need to ensure it is generated already.
+    ComputeCallInitialize(argc, NOT_IN_LOOP);
+  }
+  CALL_HEAP_FUNCTION(StubCache::ComputeCallInitialize(argc, in_loop), Code);
 }
 
 
diff --git a/src/codegen.h b/src/codegen.h
index 6192796..487a7a4 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -88,7 +88,8 @@
 
 #include "register-allocator.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // Code generation can be nested.  Code generation scopes form a stack
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index f9658e8..028ab61 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -29,7 +29,8 @@
 
 #include "compilation-cache.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 enum {
   // The number of script generations tell how many GCs a script can
@@ -44,6 +45,12 @@
 };
 
 
+// Current enable state of the compilation cache.
+static bool enabled = true;
+static inline bool IsEnabled() {
+  return FLAG_compilation_cache && enabled;
+}
+
 // Keep separate tables for the different entry kinds.
 static Object* tables[NUMBER_OF_TABLE_ENTRIES] = { 0, };
 
@@ -138,6 +145,10 @@
                                                   Handle<Object> name,
                                                   int line_offset,
                                                   int column_offset) {
+  if (!IsEnabled()) {
+    return Handle<JSFunction>::null();
+  }
+
   // Use an int for the generation index, so value range propagation
   // in gcc 4.3+ won't assume it can only go up to LAST_ENTRY when in
   // fact it can go up to SCRIPT + NUMBER_OF_SCRIPT_GENERATIONS.
@@ -164,7 +175,7 @@
     }
   }
 
-  // Once outside the menacles of the handle scope, we need to recheck
+  // Once outside the manacles of the handle scope, we need to recheck
   // to see if we actually found a cached script. If so, we return a
   // handle created in the caller's handle scope.
   if (result != NULL) {
@@ -185,6 +196,10 @@
 Handle<JSFunction> CompilationCache::LookupEval(Handle<String> source,
                                                 Handle<Context> context,
                                                 Entry entry) {
+  if (!IsEnabled()) {
+    return Handle<JSFunction>::null();
+  }
+
   ASSERT(entry == EVAL_GLOBAL || entry == EVAL_CONTEXTUAL);
   Handle<JSFunction> result = Lookup(source, context, entry);
   if (result.is_null()) {
@@ -198,6 +213,10 @@
 
 Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
                                                   JSRegExp::Flags flags) {
+  if (!IsEnabled()) {
+    return Handle<FixedArray>::null();
+  }
+
   Handle<FixedArray> result = Lookup(source, flags);
   if (result.is_null()) {
     Counters::compilation_cache_misses.Increment();
@@ -210,6 +229,10 @@
 
 void CompilationCache::PutScript(Handle<String> source,
                                  Handle<JSFunction> boilerplate) {
+  if (!IsEnabled()) {
+    return;
+  }
+
   HandleScope scope;
   ASSERT(boilerplate->IsBoilerplate());
   Handle<CompilationCacheTable> table = GetTable(SCRIPT);
@@ -221,6 +244,10 @@
                                Handle<Context> context,
                                Entry entry,
                                Handle<JSFunction> boilerplate) {
+  if (!IsEnabled()) {
+    return;
+  }
+
   HandleScope scope;
   ASSERT(boilerplate->IsBoilerplate());
   Handle<CompilationCacheTable> table = GetTable(entry);
@@ -232,6 +259,10 @@
 void CompilationCache::PutRegExp(Handle<String> source,
                                  JSRegExp::Flags flags,
                                  Handle<FixedArray> data) {
+  if (!IsEnabled()) {
+    return;
+  }
+
   HandleScope scope;
   Handle<CompilationCacheTable> table = GetTable(REGEXP);
   CALL_HEAP_FUNCTION_VOID(table->PutRegExp(*source, flags, *data));
@@ -261,4 +292,15 @@
 }
 
 
+void CompilationCache::Enable() {
+  enabled = true;
+}
+
+
+void CompilationCache::Disable() {
+  enabled = false;
+  Clear();
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index b10b561..4545def 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -28,7 +28,8 @@
 #ifndef V8_COMPILATION_CACHE_H_
 #define V8_COMPILATION_CACHE_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // The compilation cache keeps function boilerplates for compiled
@@ -95,6 +96,11 @@
   // take place. This is used to retire entries from the cache to
   // avoid keeping them alive too long without using them.
   static void MarkCompactPrologue();
+
+  // Enable/disable compilation cache. Used by debugger to disable compilation
+  // cache during debugging to make sure new scripts are always compiled.
+  static void Enable();
+  static void Disable();
 };
 
 
diff --git a/src/compiler.cc b/src/compiler.cc
index a5d83fb..5632ff7 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -37,7 +37,8 @@
 #include "scopes.h"
 #include "usage-analyzer.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 static Handle<Code> MakeCode(FunctionLiteral* literal,
                              Handle<Script> script,
@@ -159,7 +160,7 @@
 #if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
   // Log the code generation for the script. Check explicit whether logging is
   // to avoid allocating when not required.
-  if (Logger::is_enabled() || OProfileAgent::is_enabled()) {
+  if (Logger::IsEnabled() || OProfileAgent::is_enabled()) {
     if (script->name()->IsString()) {
       SmartPointer<char> data =
           String::cast(script->name())->ToCString(DISALLOW_NULLS);
@@ -267,7 +268,6 @@
 
 Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
                                          Handle<Context> context,
-                                         int line_offset,
                                          bool is_global,
                                          bool is_json) {
   int source_length = source->length();
@@ -287,7 +287,6 @@
   if (result.is_null()) {
     // Create a script object describing the script to be compiled.
     Handle<Script> script = Factory::NewScript(source);
-    script->set_line_offset(Smi::FromInt(line_offset));
     result = MakeFunction(is_global,
                           true,
                           is_json,
@@ -358,9 +357,9 @@
   // Log the code generation. If source information is available include script
   // name and line number. Check explicit whether logging is enabled as finding
   // the line number is not for free.
-  if (Logger::is_enabled() || OProfileAgent::is_enabled()) {
-    Handle<String> func_name(lit->name()->length() > 0 ?
-                             *lit->name() : shared->inferred_name());
+  if (Logger::IsEnabled() || OProfileAgent::is_enabled()) {
+    Handle<String> func_name(name->length() > 0 ?
+                             *name : shared->inferred_name());
     if (script->name()->IsString()) {
       int line_num = GetScriptLineNumber(script, start_position);
       if (line_num > 0) {
diff --git a/src/compiler.h b/src/compiler.h
index 24d77ba..9f02a8d 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -32,7 +32,8 @@
 #include "parser.h"
 #include "zone.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // The V8 compiler
 //
@@ -61,7 +62,6 @@
   // Compile a String source within a context for Eval.
   static Handle<JSFunction> CompileEval(Handle<String> source,
                                         Handle<Context> context,
-                                        int line_offset,
                                         bool is_global,
                                         bool is_json);
 
diff --git a/src/contexts.cc b/src/contexts.cc
index 36b5488..873c23c 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -31,7 +31,8 @@
 #include "debug.h"
 #include "scopeinfo.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 JSBuiltinsObject* Context::builtins() {
   GlobalObject* object = global();
diff --git a/src/contexts.h b/src/contexts.h
index eb0b962..bdfc40b 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -28,7 +28,8 @@
 #ifndef V8_CONTEXTS_H_
 #define V8_CONTEXTS_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 enum ContextLookupFlags {
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index 64e4a79..8c875d7 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -38,7 +38,8 @@
 #include "conversions.h"
 #include "platform.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // The fast double-to-int conversion routine does not guarantee
 // rounding towards zero.
diff --git a/src/conversions.cc b/src/conversions.cc
index 57a4568..7f63d9b 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -33,7 +33,8 @@
 #include "factory.h"
 #include "scanner.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 int HexValue(uc32 c) {
   if ('0' <= c && c <= '9')
diff --git a/src/conversions.h b/src/conversions.h
index 605327d..b6589cb 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -28,7 +28,8 @@
 #ifndef V8_CONVERSIONS_H_
 #define V8_CONVERSIONS_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // The fast double-to-int conversion routine does not guarantee
 // rounding towards zero.
diff --git a/src/counters.cc b/src/counters.cc
index bf9b8d8..239a5f7 100644
--- a/src/counters.cc
+++ b/src/counters.cc
@@ -30,7 +30,8 @@
 #include "counters.h"
 #include "platform.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 CounterLookupCallback StatsTable::lookup_function_ = NULL;
 CreateHistogramCallback StatsTable::create_histogram_function_ = NULL;
diff --git a/src/counters.h b/src/counters.h
index df1c70a..63be956 100644
--- a/src/counters.h
+++ b/src/counters.h
@@ -28,7 +28,8 @@
 #ifndef V8_COUNTERS_H_
 #define V8_COUNTERS_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // StatsCounters is an interface for plugging into external
 // counters for monitoring.  Counters can be looked up and
diff --git a/src/cpu.h b/src/cpu.h
index d12c30c..ddc402f 100644
--- a/src/cpu.h
+++ b/src/cpu.h
@@ -36,7 +36,8 @@
 #ifndef V8_CPU_H_
 #define V8_CPU_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // ----------------------------------------------------------------------------
 // CPU
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index c2dc531..3a091f9 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -280,7 +280,10 @@
   // Only get here if the exec failed.  Write errno to the parent to tell
   // them it went wrong.  If it went well the pipe is closed.
   int err = errno;
-  write(exec_error_fds[kWriteFD], &err, sizeof(err));
+  int bytes_written;
+  do {
+    bytes_written = write(exec_error_fds[kWriteFD], &err, sizeof(err));
+  } while (bytes_written == -1 && errno == EINTR);
   // Return (and exit child process).
 }
 
diff --git a/src/dateparser-inl.h b/src/dateparser-inl.h
index 61a8c72..3d4161d 100644
--- a/src/dateparser-inl.h
+++ b/src/dateparser-inl.h
@@ -28,7 +28,8 @@
 #ifndef V8_DATEPARSER_INL_H_
 #define V8_DATEPARSER_INL_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 template <typename Char>
 bool DateParser::Parse(Vector<Char> str, FixedArray* out) {
diff --git a/src/dateparser.cc b/src/dateparser.cc
index a1ae55d..1cc9aa1 100644
--- a/src/dateparser.cc
+++ b/src/dateparser.cc
@@ -29,7 +29,8 @@
 
 #include "dateparser.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 bool DateParser::DayComposer::Write(FixedArray* output) {
   int year = 0;  // Default year is 0 (=> 2000) for KJS compatibility.
diff --git a/src/dateparser.h b/src/dateparser.h
index 04d7e8b..d339a4f 100644
--- a/src/dateparser.h
+++ b/src/dateparser.h
@@ -30,7 +30,8 @@
 
 #include "scanner.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 class DateParser : public AllStatic {
  public:
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index 63f143a..62cc251 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -30,7 +30,8 @@
 #include "debug-agent.h"
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Public V8 debugger API message handler function. This function just delegates
 // to the debugger agent through it's data parameter.
diff --git a/src/debug-agent.h b/src/debug-agent.h
index a3c6025..04f883f 100644
--- a/src/debug-agent.h
+++ b/src/debug-agent.h
@@ -32,7 +32,8 @@
 #include "../include/v8-debug.h"
 #include "platform.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Forward decelrations.
 class DebuggerAgentSession;
diff --git a/src/debug-delay.js b/src/debug-delay.js
index f196031..ea566a9 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -43,7 +43,8 @@
                      Exception: 2,
                      NewFunction: 3,
                      BeforeCompile: 4,
-                     AfterCompile: 5 };
+                     AfterCompile: 5,
+                     ScriptCollected: 6 };
 
 // Types of exceptions that can be broken upon.
 Debug.ExceptionBreak = { All : 0,
@@ -1015,6 +1016,37 @@
 };
 
 
+function MakeScriptCollectedEvent(exec_state, id) {
+  return new ScriptCollectedEvent(exec_state, id);
+}
+
+
+function ScriptCollectedEvent(exec_state, id) {
+  this.exec_state_ = exec_state;
+  this.id_ = id;
+}
+
+
+ScriptCollectedEvent.prototype.id = function() {
+  return this.id_;
+};
+
+
+ScriptCollectedEvent.prototype.executionState = function() {
+  return this.exec_state_;
+};
+
+
+ScriptCollectedEvent.prototype.toJSONProtocol = function() {
+  var o = new ProtocolMessage();
+  o.running = true;
+  o.event = "scriptCollected";
+  o.body = {};
+  o.body.script = { id: this.id() };
+  return o.toJSONProtocol();
+}
+
+
 function MakeScriptObject_(script, include_source) {
   var o = { id: script.id(),
             name: script.name(),
@@ -1139,7 +1171,7 @@
   try {
     try {
       // Convert the JSON string to an object.
-      request = %CompileString('(' + json_request + ')', 0, false)();
+      request = %CompileString('(' + json_request + ')', false)();
 
       // Create an initial response.
       response = this.createResponse(request);
diff --git a/src/debug.cc b/src/debug.cc
index 8422a67..becfaa6 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -31,6 +31,7 @@
 #include "arguments.h"
 #include "bootstrapper.h"
 #include "code-stubs.h"
+#include "compilation-cache.h"
 #include "compiler.h"
 #include "debug.h"
 #include "execution.h"
@@ -43,7 +44,8 @@
 
 #include "../include/v8-debug.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 static void PrintLn(v8::Local<v8::Value> value) {
@@ -425,6 +427,7 @@
 
 
 bool Debug::has_break_points_ = false;
+ScriptCache* Debug::script_cache_ = NULL;
 DebugInfoListNode* Debug::debug_info_list_ = NULL;
 
 
@@ -486,6 +489,96 @@
 Code* Debug::debug_break_return_ = NULL;
 
 
+void ScriptCache::Add(Handle<Script> script) {
+  // Create an entry in the hash map for the script.
+  int id = Smi::cast(script->id())->value();
+  HashMap::Entry* entry =
+      HashMap::Lookup(reinterpret_cast<void*>(id), Hash(id), true);
+  if (entry->value != NULL) {
+    ASSERT(*script == *reinterpret_cast<Script**>(entry->value));
+    return;
+  }
+
+  // Globalize the script object, make it weak and use the location of the
+  // global handle as the value in the hash map.
+  Handle<Script> script_ =
+      Handle<Script>::cast((GlobalHandles::Create(*script)));
+  GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
+                          this, ScriptCache::HandleWeakScript);
+  entry->value = script_.location();
+}
+
+
+Handle<FixedArray> ScriptCache::GetScripts() {
+  Handle<FixedArray> instances = Factory::NewFixedArray(occupancy());
+  int count = 0;
+  for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
+    ASSERT(entry->value != NULL);
+    if (entry->value != NULL) {
+      instances->set(count, *reinterpret_cast<Script**>(entry->value));
+      count++;
+    }
+  }
+  return instances;
+}
+
+
+void ScriptCache::ProcessCollectedScripts() {
+  for (int i = 0; i < collected_scripts_.length(); i++) {
+    Debugger::OnScriptCollected(collected_scripts_[i]);
+  }
+  collected_scripts_.Clear();
+}
+
+
+void ScriptCache::Clear() {
+  // Iterate the script cache to get rid of all the weak handles.
+  for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
+    ASSERT(entry != NULL);
+    Object** location = reinterpret_cast<Object**>(entry->value);
+    ASSERT((*location)->IsScript());
+    GlobalHandles::ClearWeakness(location);
+    GlobalHandles::Destroy(location);
+  }
+  // Clear the content of the hash map.
+  HashMap::Clear();
+}
+
+
+void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) {
+  ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data);
+  // Find the location of the global handle.
+  Script** location =
+      reinterpret_cast<Script**>(Utils::OpenHandle(*obj).location());
+  ASSERT((*location)->IsScript());
+
+  // Remove the entry from the cache.
+  int id = Smi::cast((*location)->id())->value();
+  script_cache->Remove(reinterpret_cast<void*>(id), Hash(id));
+  script_cache->collected_scripts_.Add(id);
+
+  // Clear the weak handle.
+  obj.Dispose();
+  obj.Clear();
+}
+
+
+void Debug::Setup(bool create_heap_objects) {
+  ThreadInit();
+  if (create_heap_objects) {
+    // Get code to handle entry to debug break on return.
+    debug_break_return_entry_ =
+        Builtins::builtin(Builtins::Return_DebugBreakEntry);
+    ASSERT(debug_break_return_entry_->IsCode());
+
+    // Get code to handle debug break on return.
+    debug_break_return_ =
+        Builtins::builtin(Builtins::Return_DebugBreak);
+    ASSERT(debug_break_return_->IsCode());
+  }
+}
+
+
 void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
   DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
   RemoveDebugInfo(node->debug_info());
@@ -512,22 +605,6 @@
 }
 
 
-void Debug::Setup(bool create_heap_objects) {
-  ThreadInit();
-  if (create_heap_objects) {
-    // Get code to handle entry to debug break on return.
-    debug_break_return_entry_ =
-        Builtins::builtin(Builtins::Return_DebugBreakEntry);
-    ASSERT(debug_break_return_entry_->IsCode());
-
-    // Get code to handle debug break on return.
-    debug_break_return_ =
-        Builtins::builtin(Builtins::Return_DebugBreak);
-    ASSERT(debug_break_return_->IsCode());
-  }
-}
-
-
 bool Debug::CompileDebuggerScript(int index) {
   HandleScope scope;
 
@@ -627,6 +704,7 @@
 
   // Debugger loaded.
   debug_context_ = Handle<Context>::cast(GlobalHandles::Create(*context));
+
   return true;
 }
 
@@ -637,6 +715,9 @@
     return;
   }
 
+  // Clear the script cache.
+  DestroyScriptCache();
+
   // Clear debugger context global handle.
   GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_context_.location()));
   debug_context_ = Handle<Context>();
@@ -1414,6 +1495,94 @@
 }
 
 
+// If an object given is an external string, check that the underlying
+// resource is accessible. For other kinds of objects, always return true.
+static bool IsExternalStringValid(Object* str) {
+  if (!str->IsString() || !StringShape(String::cast(str)).IsExternal()) {
+    return true;
+  }
+  if (String::cast(str)->IsAsciiRepresentation()) {
+    return ExternalAsciiString::cast(str)->resource() != NULL;
+  } else if (String::cast(str)->IsTwoByteRepresentation()) {
+    return ExternalTwoByteString::cast(str)->resource() != NULL;
+  } else {
+    return true;
+  }
+}
+
+
+void Debug::CreateScriptCache() {
+  HandleScope scope;
+
+  // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
+  // rid of all the cached script wrappers and the second gets rid of the
+  // scripts which is no longer referenced.
+  Heap::CollectAllGarbage();
+  Heap::CollectAllGarbage();
+
+  ASSERT(script_cache_ == NULL);
+  script_cache_ = new ScriptCache();
+
+  // Scan heap for Script objects.
+  int count = 0;
+  HeapIterator iterator;
+  while (iterator.has_next()) {
+    HeapObject* obj = iterator.next();
+    ASSERT(obj != NULL);
+    if (obj->IsScript() && IsExternalStringValid(Script::cast(obj)->source())) {
+      script_cache_->Add(Handle<Script>(Script::cast(obj)));
+      count++;
+    }
+  }
+}
+
+
+void Debug::DestroyScriptCache() {
+  // Get rid of the script cache if it was created.
+  if (script_cache_ != NULL) {
+    delete script_cache_;
+    script_cache_ = NULL;
+  }
+}
+
+
+void Debug::AddScriptToScriptCache(Handle<Script> script) {
+  if (script_cache_ != NULL) {
+    script_cache_->Add(script);
+  }
+}
+
+
+Handle<FixedArray> Debug::GetLoadedScripts() {
+  // Create and fill the script cache when the loaded scripts is requested for
+  // the first time.
+  if (script_cache_ == NULL) {
+    CreateScriptCache();
+  }
+
+  // If the script cache is not active just return an empty array.
+  ASSERT(script_cache_ != NULL);
+  if (script_cache_ == NULL) {
+    Factory::NewFixedArray(0);
+  }
+
+  // Perform GC to get unreferenced scripts evicted from the cache before
+  // returning the content.
+  Heap::CollectAllGarbage();
+
+  // Get the scripts from the cache.
+  return script_cache_->GetScripts();
+}
+
+
+void Debug::AfterGarbageCollection() {
+  // Generate events for collected scripts.
+  if (script_cache_ != NULL) {
+    script_cache_->ProcessCollectedScripts();
+  }
+}
+
+
 Mutex* Debugger::debugger_access_ = OS::CreateMutex();
 Handle<Object> Debugger::event_listener_ = Handle<Object>();
 Handle<Object> Debugger::event_listener_data_ = Handle<Object>();
@@ -1421,7 +1590,7 @@
 bool Debugger::is_loading_debugger_ = false;
 bool Debugger::never_unload_debugger_ = false;
 v8::Debug::MessageHandler2 Debugger::message_handler_ = NULL;
-bool Debugger::message_handler_cleared_ = false;
+bool Debugger::debugger_unload_pending_ = false;
 v8::Debug::HostDispatchHandler Debugger::host_dispatch_handler_ = NULL;
 int Debugger::host_dispatch_micros_ = 100 * 1000;
 DebuggerAgent* Debugger::agent_ = NULL;
@@ -1518,6 +1687,21 @@
 }
 
 
+Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
+                                                  bool* caught_exception) {
+  // Create the script collected event object.
+  Handle<Object> exec_state = MakeExecutionState(caught_exception);
+  Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
+  const int argc = 2;
+  Object** argv[argc] = { exec_state.location(), id_object.location() };
+
+  return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
+                      argc,
+                      argv,
+                      caught_exception);
+}
+
+
 void Debugger::OnException(Handle<Object> exception, bool uncaught) {
   HandleScope scope;
 
@@ -1624,12 +1808,15 @@
 void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSFunction> fun) {
   HandleScope scope;
 
-  // No compile events while compiling natives.
-  if (compiling_natives()) return;
+  // Add the newly compiled script to the script cache.
+  Debug::AddScriptToScriptCache(script);
 
   // No more to do if not debugging.
   if (!IsDebuggerActive()) return;
 
+  // No compile events while compiling natives.
+  if (compiling_natives()) return;
+
   // Store whether in debugger before entering debugger.
   bool in_debugger = Debug::InDebugger();
 
@@ -1708,6 +1895,33 @@
 }
 
 
+void Debugger::OnScriptCollected(int id) {
+  HandleScope scope;
+
+  // No more to do if not debugging.
+  if (!IsDebuggerActive()) return;
+  if (!Debugger::EventActive(v8::ScriptCollected)) return;
+
+  // Enter the debugger.
+  EnterDebugger debugger;
+  if (debugger.FailedToEnter()) return;
+
+  // Create the script collected state object.
+  bool caught_exception = false;
+  Handle<Object> event_data = MakeScriptCollectedEvent(id,
+                                                       &caught_exception);
+  // Bail out and don't call debugger if exception.
+  if (caught_exception) {
+    return;
+  }
+
+  // Process debug event.
+  ProcessDebugEvent(v8::ScriptCollected,
+                    Handle<JSObject>::cast(event_data),
+                    true);
+}
+
+
 void Debugger::ProcessDebugEvent(v8::DebugEvent event,
                                  Handle<JSObject> event_data,
                                  bool auto_continue) {
@@ -1756,9 +1970,6 @@
       }
     }
   }
-
-  // Clear the mirror cache.
-  Debug::ClearMirrorCache();
 }
 
 
@@ -1771,8 +1982,8 @@
     Debug::Unload();
   }
 
-  // Clear the flag indicating that the message handler was recently cleared.
-  message_handler_cleared_ = false;
+  // Clear the flag indicating that the debugger should be unloaded.
+  debugger_unload_pending_ = false;
 }
 
 
@@ -1798,6 +2009,9 @@
     case v8::AfterCompile:
       sendEventMessage = true;
       break;
+    case v8::ScriptCollected:
+      sendEventMessage = true;
+      break;
     case v8::NewFunction:
       break;
     default:
@@ -1956,10 +2170,7 @@
     event_listener_data_ = Handle<Object>::cast(GlobalHandles::Create(*data));
   }
 
-  // Unload the debugger if event listener cleared.
-  if (callback->IsUndefined()) {
-    UnloadDebugger();
-  }
+  ListenersChanged();
 }
 
 
@@ -1967,10 +2178,8 @@
   ScopedLock with(debugger_access_);
 
   message_handler_ = handler;
+  ListenersChanged();
   if (handler == NULL) {
-    // Indicate that the message handler was recently cleared.
-    message_handler_cleared_ = true;
-
     // Send an empty command to the debugger if in a break to make JavaScript
     // run again if the debugger is closed.
     if (Debug::InDebugger()) {
@@ -1980,6 +2189,25 @@
 }
 
 
+void Debugger::ListenersChanged() {
+  if (IsDebuggerActive()) {
+    // Disable the compilation cache when the debugger is active.
+    CompilationCache::Disable();
+  } else {
+    CompilationCache::Enable();
+
+    // Unload the debugger if event listener and message handler cleared.
+    if (Debug::InDebugger()) {
+      // If we are in debugger set the flag to unload the debugger when last
+      // EnterDebugger on the current stack is destroyed.
+      debugger_unload_pending_ = true;
+    } else {
+      UnloadDebugger();
+    }
+  }
+}
+
+
 void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
                                       int period) {
   host_dispatch_handler_ = handler;
@@ -2172,7 +2400,14 @@
 
 
 v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
-  return v8::Utils::ToLocal(Debug::debugger_entry()->GetContext());
+  Handle<Context> context = Debug::debugger_entry()->GetContext();
+  // Top::context() may have been NULL when "script collected" event occured.
+  if (*context == NULL) {
+    ASSERT(event_ == v8::ScriptCollected);
+    return v8::Local<v8::Context>();
+  }
+  Handle<Context> global_context(context->global_context());
+  return v8::Utils::ToLocal(global_context);
 }
 
 
diff --git a/src/debug.h b/src/debug.h
index 35336cb..3f90fa6 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -33,6 +33,7 @@
 #include "debug-agent.h"
 #include "execution.h"
 #include "factory.h"
+#include "hashmap.h"
 #include "platform.h"
 #include "string-stream.h"
 #include "v8threads.h"
@@ -40,7 +41,8 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 #include "../include/v8-debug.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // Forward declarations.
@@ -144,6 +146,42 @@
 };
 
 
+// Cache of all script objects in the heap. When a script is added a weak handle
+// to it is created and that weak handle is stored in the cache. The weak handle
+// callback takes care of removing the script from the cache. The key used in
+// the cache is the script id.
+class ScriptCache : private HashMap {
+ public:
+  ScriptCache() : HashMap(ScriptMatch), collected_scripts_(10) {}
+  virtual ~ScriptCache() { Clear(); }
+
+  // Add script to the cache.
+  void Add(Handle<Script> script);
+
+  // Return the scripts in the cache.
+  Handle<FixedArray> GetScripts();
+
+  // Generate debugger events for collected scripts.
+  void ProcessCollectedScripts();
+
+ private:
+  // Calculate the hash value from the key (script id).
+  static uint32_t Hash(int key) { return ComputeIntegerHash(key); }
+
+  // Scripts match if their keys (script id) match.
+  static bool ScriptMatch(void* key1, void* key2) { return key1 == key2; }
+
+  // Clear the cache releasing all the weak handles.
+  void Clear();
+
+  // Weak handle callback for scripts in the cache.
+  static void HandleWeakScript(v8::Persistent<v8::Value> obj, void* data);
+
+  // List used during GC to temporarily store id's of collected scripts.
+  List<int> collected_scripts_;
+};
+
+
 // Linked list holding debug info objects. The debug info objects are kept as
 // weak handles to avoid a debug info object to keep a function alive.
 class DebugInfoListNode {
@@ -230,9 +268,6 @@
   }
   static int break_id() { return thread_local_.break_id_; }
 
-
-
-
   static bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
   static void HandleStepIn(Handle<JSFunction> function,
                            Address fp,
@@ -307,6 +342,15 @@
   // Mirror cache handling.
   static void ClearMirrorCache();
 
+  // Script cache handling.
+  static void CreateScriptCache();
+  static void DestroyScriptCache();
+  static void AddScriptToScriptCache(Handle<Script> script);
+  static Handle<FixedArray> GetLoadedScripts();
+
+  // Garbage collection notifications.
+  static void AfterGarbageCollection();
+
   // Code generation assumptions.
   static const int kIa32CallInstructionLength = 5;
   static const int kIa32JSReturnSequenceLength = 6;
@@ -343,6 +387,11 @@
 
   // Boolean state indicating whether any break points are set.
   static bool has_break_points_;
+
+  // Cache of all scripts in the heap.
+  static ScriptCache* script_cache_;
+
+  // List of active debug info objects.
   static DebugInfoListNode* debug_info_list_;
 
   static bool disable_break_;
@@ -532,12 +581,15 @@
   static Handle<Object> MakeCompileEvent(Handle<Script> script,
                                          bool before,
                                          bool* caught_exception);
+  static Handle<Object> MakeScriptCollectedEvent(int id,
+                                                 bool* caught_exception);
   static void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
   static void OnException(Handle<Object> exception, bool uncaught);
   static void OnBeforeCompile(Handle<Script> script);
   static void OnAfterCompile(Handle<Script> script,
                            Handle<JSFunction> fun);
   static void OnNewFunction(Handle<JSFunction> fun);
+  static void OnScriptCollected(int id);
   static void ProcessDebugEvent(v8::DebugEvent event,
                                 Handle<JSObject> event_data,
                                 bool auto_continue);
@@ -578,7 +630,7 @@
     ScopedLock with(debugger_access_);
 
     // Check whether the message handler was been cleared.
-    if (message_handler_cleared_) {
+    if (debugger_unload_pending_) {
       UnloadDebugger();
     }
 
@@ -595,6 +647,7 @@
 
  private:
   static bool IsDebuggerActive();
+  static void ListenersChanged();
 
   static Mutex* debugger_access_;  // Mutex guarding debugger variables.
   static Handle<Object> event_listener_;  // Global handle to listener.
@@ -603,7 +656,7 @@
   static bool is_loading_debugger_;  // Are we loading the debugger?
   static bool never_unload_debugger_;  // Can we unload the debugger?
   static v8::Debug::MessageHandler2 message_handler_;
-  static bool message_handler_cleared_;  // Was message handler cleared?
+  static bool debugger_unload_pending_;  // Was message handler cleared?
   static v8::Debug::HostDispatchHandler host_dispatch_handler_;
   static int host_dispatch_micros_;
 
@@ -670,8 +723,15 @@
       StackGuard::DebugCommand();
     }
 
-    // If leaving the debugger with the debugger no longer active unload it.
     if (prev_ == NULL) {
+      // Clear mirror cache when leaving the debugger. Skip this if there is a
+      // pending exception as clearing the mirror cache calls back into
+      // JavaScript. This can happen if the v8::Debug::Call is used in which
+      // case the exception should end up in the calling code.
+      if (!Top::has_pending_exception()) {
+        Debug::ClearMirrorCache();
+      }
+      // If leaving the debugger with the debugger no longer active unload it.
       if (!Debugger::IsDebuggerActive()) {
         Debugger::UnloadDebugger();
       }
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 47378e0..95022d0 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -36,7 +36,8 @@
 #include "serialize.h"
 #include "string-stream.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #ifdef ENABLE_DISASSEMBLER
 
diff --git a/src/disassembler.h b/src/disassembler.h
index 5003c00..68a338d 100644
--- a/src/disassembler.h
+++ b/src/disassembler.h
@@ -28,7 +28,8 @@
 #ifndef V8_DISASSEMBLER_H_
 #define V8_DISASSEMBLER_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 class Disassembler : public AllStatic {
  public:
diff --git a/src/execution.cc b/src/execution.cc
index 352131b..682cda6 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -43,7 +43,8 @@
 #include "debug.h"
 #include "v8threads.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 static Handle<Object> Invoke(bool construct,
diff --git a/src/execution.h b/src/execution.h
index 533043b..8cfdec2 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -28,7 +28,8 @@
 #ifndef V8_EXECUTION_H_
 #define V8_EXECUTION_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // Flag used to set the interrupt causes.
diff --git a/src/factory.cc b/src/factory.cc
index 4b0b7f5..8b20407 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -33,7 +33,8 @@
 #include "factory.h"
 #include "macro-assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
@@ -509,8 +510,10 @@
 }
 
 
-Handle<Code> Factory::NewCode(const CodeDesc& desc, ScopeInfo<>* sinfo,
-                              Code::Flags flags, Handle<Object> self_ref) {
+Handle<Code> Factory::NewCode(const CodeDesc& desc,
+                              ZoneScopeInfo* sinfo,
+                              Code::Flags flags,
+                              Handle<Object> self_ref) {
   CALL_HEAP_FUNCTION(Heap::CreateCode(desc, sinfo, flags, self_ref), Code);
 }
 
diff --git a/src/factory.h b/src/factory.h
index 6ac2706..95dbee9 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -29,8 +29,10 @@
 #define V8_FACTORY_H_
 
 #include "heap.h"
+#include "zone-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // Interface for handle based allocation.
@@ -202,8 +204,10 @@
       Handle<JSFunction> boilerplate,
       Handle<Context> context);
 
-  static Handle<Code> NewCode(const CodeDesc& desc, ScopeInfo<>* sinfo,
-                              Code::Flags flags, Handle<Object> self_reference);
+  static Handle<Code> NewCode(const CodeDesc& desc,
+                              ZoneScopeInfo* sinfo,
+                              Code::Flags flags,
+                              Handle<Object> self_reference);
 
   static Handle<Code> CopyCode(Handle<Code> code);
 
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index db49453..13e41e3 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -133,6 +133,9 @@
 DEFINE_int(min_preparse_length, 1024,
            "Minimum length for automatic enable preparsing")
 
+// compilation-cache.cc
+DEFINE_bool(compilation_cache, true, "enable compilation cache")
+
 // debug.cc
 DEFINE_bool(remote_debugging, false, "enable remote debugging")
 DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
@@ -333,6 +336,9 @@
             "Log statistical profiling information (implies --log-code).")
 DEFINE_bool(prof_auto, true,
             "Used with --prof, starts profiling automatically")
+DEFINE_bool(prof_lazy, false,
+            "Used with --prof, only does sampling and logging"
+            " when profiler is active (implies --noprof_auto).")
 DEFINE_bool(log_regexp, false, "Log regular expression execution.")
 DEFINE_bool(sliding_state_window, false,
             "Update sliding state window counters.")
diff --git a/src/flags.cc b/src/flags.cc
index 215b7c4..5df3afd 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -35,7 +35,8 @@
 #include "string-stream.h"
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Define all of our flags.
 #define FLAG_MODE_DEFINE
diff --git a/src/flags.h b/src/flags.h
index e6cbe3c..a8eca95 100644
--- a/src/flags.h
+++ b/src/flags.h
@@ -29,7 +29,8 @@
 
 #include "checks.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Declare all of our flags.
 #define FLAG_MODE_DECLARE
diff --git a/src/frame-element.h b/src/frame-element.h
index 8bfafad..d16eb48 100644
--- a/src/frame-element.h
+++ b/src/frame-element.h
@@ -30,7 +30,8 @@
 
 #include "register-allocator-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // Virtual frame elements
@@ -152,21 +153,27 @@
   }
 
   bool Equals(FrameElement other) {
-    if (value_ == other.value_) return true;
-
-    if (type() != other.type() ||
-        is_copied() != other.is_copied() ||
-        is_synced() != other.is_synced()) return false;
-
-    if (is_register()) {
-      if (!reg().is(other.reg())) return false;
-    } else if (is_constant()) {
-      if (!handle().is_identical_to(other.handle())) return false;
-    } else if (is_copy()) {
-      if (index() != other.index()) return false;
+    uint32_t masked_difference = (value_ ^ other.value_) & ~CopiedField::mask();
+    if (!masked_difference) {
+      // The elements are equal if they agree exactly except on copied field.
+      return true;
+    } else {
+      // If two constants have the same value, and agree otherwise, return true.
+       return !(masked_difference & ~DataField::mask()) &&
+              is_constant() &&
+              handle().is_identical_to(other.handle());
     }
+  }
 
-    return true;
+  // Test if two FrameElements refer to the same memory or register location.
+  bool SameLocation(FrameElement* other) {
+    if (type() == other->type()) {
+      if (value_ == other->value_) return true;
+      if (is_constant() && handle().is_identical_to(other->handle())) {
+        return true;
+      }
+    }
+    return false;
   }
 
   // Given a pair of non-null frame element pointers, return one of them
@@ -176,14 +183,7 @@
     if (!is_valid()) return this;
     if (!other->is_valid()) return other;
 
-    // If they do not have the exact same location we reallocate.
-    bool not_same_location =
-        (type() != other->type()) ||
-        (is_register() && !reg().is(other->reg())) ||
-        (is_constant() && !handle().is_identical_to(other->handle())) ||
-        (is_copy() && index() != other->index());
-    if (not_same_location) return NULL;
-
+    if (!SameLocation(other)) return NULL;
     // If either is unsynced, the result is.  The result static type is
     // the merge of the static types.  It's safe to set it on one of the
     // frame elements, and harmless too (because we are only going to
diff --git a/src/frames-inl.h b/src/frames-inl.h
index bf46f6b..28be430 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -38,7 +38,8 @@
 #include "arm/frames-arm.h"
 #endif
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 inline Address StackHandler::address() const {
diff --git a/src/frames.cc b/src/frames.cc
index 1eedbf6..dd0ea00 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -34,7 +34,8 @@
 #include "top.h"
 #include "zone-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Iterator that supports traversing the stack handlers of a
 // particular frame. Needs to know the top of the handler chain.
diff --git a/src/frames.h b/src/frames.h
index 8ab4be9..a75befb 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -28,7 +28,8 @@
 #ifndef V8_FRAMES_H_
 #define V8_FRAMES_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 typedef uint32_t RegList;
 
diff --git a/src/func-name-inferrer.cc b/src/func-name-inferrer.cc
index 75f7a99..2d6a86a 100644
--- a/src/func-name-inferrer.cc
+++ b/src/func-name-inferrer.cc
@@ -30,7 +30,8 @@
 #include "ast.h"
 #include "func-name-inferrer.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
diff --git a/src/func-name-inferrer.h b/src/func-name-inferrer.h
index d8270c3..e88586a 100644
--- a/src/func-name-inferrer.h
+++ b/src/func-name-inferrer.h
@@ -28,47 +28,53 @@
 #ifndef V8_FUNC_NAME_INFERRER_H_
 #define V8_FUNC_NAME_INFERRER_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // FuncNameInferrer is a stateful class that is used to perform name
 // inference for anonymous functions during static analysis of source code.
 // Inference is performed in cases when an anonymous function is assigned
 // to a variable or a property (see test-func-name-inference.cc for examples.)
-
+//
 // The basic idea is that during AST traversal LHSs of expressions are
 // always visited before RHSs. Thus, during visiting the LHS, a name can be
 // collected, and during visiting the RHS, a function literal can be collected.
 // Inference is performed while leaving the assignment node.
-
 class FuncNameInferrer BASE_EMBEDDED {
  public:
-  FuncNameInferrer() :
-      entries_stack_(10),
-      names_stack_(5),
-      funcs_to_infer_(4),
-      dot_(Factory::NewStringFromAscii(CStrVector("."))) {
+  FuncNameInferrer()
+      : entries_stack_(10),
+        names_stack_(5),
+        funcs_to_infer_(4),
+        dot_(Factory::NewStringFromAscii(CStrVector("."))) {
   }
 
+  // Returns whether we have entered name collection state.
   bool IsOpen() const { return !entries_stack_.is_empty(); }
 
+  // Pushes an enclosing the name of enclosing function onto names stack.
   void PushEnclosingName(Handle<String> name);
 
+  // Enters name collection state.
   void Enter() {
     entries_stack_.Add(names_stack_.length());
   }
 
+  // Pushes an encountered name onto names stack when in collection state.
   void PushName(Handle<String> name) {
     if (IsOpen()) {
       names_stack_.Add(name);
     }
   }
 
+  // Adds a function to infer name for.
   void AddFunction(FunctionLiteral* func_to_infer) {
     if (IsOpen()) {
       funcs_to_infer_.Add(func_to_infer);
     }
   }
 
+  // Infers a function name and leaves names collection state.
   void InferAndLeave() {
     ASSERT(IsOpen());
     if (!funcs_to_infer_.is_empty()) {
@@ -78,13 +84,18 @@
   }
 
  private:
+  // Constructs a full name in dotted notation from gathered names.
   Handle<String> MakeNameFromStack();
+
+  // A helper function for MakeNameFromStack.
   Handle<String> MakeNameFromStackHelper(int pos, Handle<String> prev);
+
+  // Performs name inferring for added functions.
   void InferFunctionsNames();
 
-  List<int> entries_stack_;
-  List<Handle<String> > names_stack_;
-  List<FunctionLiteral*> funcs_to_infer_;
+  ZoneList<int> entries_stack_;
+  ZoneList<Handle<String> > names_stack_;
+  ZoneList<FunctionLiteral*> funcs_to_infer_;
   Handle<String> dot_;
 
   DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
@@ -95,15 +106,17 @@
 // leaving scope.
 class ScopedFuncNameInferrer BASE_EMBEDDED {
  public:
-  explicit ScopedFuncNameInferrer(FuncNameInferrer* inferrer) :
-      inferrer_(inferrer),
-      is_entered_(false) {}
+  explicit ScopedFuncNameInferrer(FuncNameInferrer* inferrer)
+      : inferrer_(inferrer),
+        is_entered_(false) {}
+
   ~ScopedFuncNameInferrer() {
     if (is_entered_) {
       inferrer_->InferAndLeave();
     }
   }
 
+  // Triggers the wrapped inferrer into name collection state.
   void Enter() {
     inferrer_->Enter();
     is_entered_ = true;
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 46b7db3..ed4e262 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -30,7 +30,8 @@
 #include "api.h"
 #include "global-handles.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 class GlobalHandles::Node : public Malloced {
  public:
diff --git a/src/global-handles.h b/src/global-handles.h
index e6e9de1..9e63ba7 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -30,7 +30,8 @@
 
 #include "list-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Structure for tracking global handles.
 // A single list keeps all the allocated global handles.
diff --git a/src/globals.h b/src/globals.h
index e0c5278..6ac59b6 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,7 +28,8 @@
 #ifndef V8_GLOBALS_H_
 #define V8_GLOBALS_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Processor architecture detection.  For more info on what's defined, see:
 //   http://msdn.microsoft.com/en-us/library/b0084kay.aspx
@@ -123,9 +124,12 @@
 #endif
 
 const int kObjectAlignmentBits = kPointerSizeLog2;
-const intptr_t kObjectAlignmentMask = (1 << kObjectAlignmentBits) - 1;
 const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
+const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
 
+// Desired alignment for pointers.
+const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
 
 // Tag information for HeapObject.
 const int kHeapObjectTag = 1;
@@ -239,6 +243,7 @@
 class TickSample;
 class VirtualMemory;
 class Mutex;
+class ZoneScopeInfo;
 
 typedef bool (*WeakSlotCallback)(Object** pointer);
 
@@ -321,8 +326,6 @@
 enum InlineCacheState {
   // Has never been executed.
   UNINITIALIZED,
-  // Has never been executed, but is in a loop.
-  UNINITIALIZED_IN_LOOP,
   // Has been executed but monomorhic state has been delayed.
   PREMONOMORPHIC,
   // Has been executed and only one receiver type has been seen.
@@ -337,6 +340,12 @@
 };
 
 
+enum InLoopFlag {
+  NOT_IN_LOOP,
+  IN_LOOP
+};
+
+
 // Type of properties.
 // Order of properties is significant.
 // Must fit in the BitField PropertyDetails::TypeField.
@@ -418,7 +427,11 @@
 
 // OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
 #define OBJECT_SIZE_ALIGN(value)                                \
-  ((value + kObjectAlignmentMask) & ~kObjectAlignmentMask)
+  (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
+
+// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
+#define POINTER_SIZE_ALIGN(value)                               \
+  (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
 
 // The expression OFFSET_OF(type, field) computes the byte-offset
 // of the specified field relative to the containing type. This
diff --git a/src/handles-inl.h b/src/handles-inl.h
index e5899e3..6013c5b 100644
--- a/src/handles-inl.h
+++ b/src/handles-inl.h
@@ -33,7 +33,8 @@
 #include "handles.h"
 #include "api.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 template<class T>
 Handle<T>::Handle(T* obj) {
diff --git a/src/handles.cc b/src/handles.cc
index 2dd71cf..0b9fc0f 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -37,7 +37,8 @@
 #include "natives.h"
 #include "runtime.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 v8::ImplementationUtilities::HandleScopeData HandleScope::current_ =
diff --git a/src/handles.h b/src/handles.h
index cf5ed56..306b016 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -30,7 +30,8 @@
 
 #include "apiutils.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // ----------------------------------------------------------------------------
 // A Handle provides a reference to an object that survives relocation by
diff --git a/src/hashmap.cc b/src/hashmap.cc
index 79cf43d..7b6b90a 100644
--- a/src/hashmap.cc
+++ b/src/hashmap.cc
@@ -29,7 +29,8 @@
 
 #include "hashmap.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 Allocator HashMap::DefaultAllocator;
 
diff --git a/src/hashmap.h b/src/hashmap.h
index c9cadb2..b92c715 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -28,7 +28,8 @@
 #ifndef V8_HASHMAP_H_
 #define V8_HASHMAP_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // Allocator defines the memory allocator interface
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 3d734e9..8dd09d7 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -31,7 +31,8 @@
 #include "log.h"
 #include "v8-counters.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 int Heap::MaxHeapObjectSize() {
   return Page::kMaxHeapObjectSize;
@@ -145,7 +146,9 @@
   if (new_space_.Contains(address)) return;
   ASSERT(!new_space_.FromSpaceContains(address));
   SLOW_ASSERT(Contains(address + offset));
+#ifndef V8_HOST_ARCH_64_BIT
   Page::SetRSet(address, offset);
+#endif  // V8_HOST_ARCH_64_BIT
 }
 
 
diff --git a/src/heap.cc b/src/heap.cc
index a5b7b30..772cf32 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -40,7 +40,8 @@
 #include "scopeinfo.h"
 #include "v8threads.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #define ROOT_ALLOCATION(type, name) type* Heap::name##_;
   ROOT_LIST(ROOT_ALLOCATION)
@@ -283,6 +284,9 @@
 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
   ReportStatisticsAfterGC();
 #endif
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  Debug::AfterGarbageCollection();
+#endif
 }
 
 
@@ -664,11 +668,33 @@
   // Copy objects reachable from weak pointers.
   GlobalHandles::IterateWeakRoots(&scavenge_visitor);
 
+#if V8_HOST_ARCH_64_BIT
+  // TODO(X64): Make this go away again. We currently disable RSets for
+  // 64-bit-mode.
+  HeapObjectIterator old_pointer_iterator(old_pointer_space_);
+  while (old_pointer_iterator.has_next()) {
+    HeapObject* heap_object = old_pointer_iterator.next();
+    heap_object->Iterate(&scavenge_visitor);
+  }
+  HeapObjectIterator map_iterator(map_space_);
+  while (map_iterator.has_next()) {
+    HeapObject* heap_object = map_iterator.next();
+    heap_object->Iterate(&scavenge_visitor);
+  }
+  LargeObjectIterator lo_iterator(lo_space_);
+  while (lo_iterator.has_next()) {
+    HeapObject* heap_object = lo_iterator.next();
+    if (heap_object->IsFixedArray()) {
+      heap_object->Iterate(&scavenge_visitor);
+    }
+  }
+#else  // V8_HOST_ARCH_64_BIT
   // Copy objects reachable from the old generation.  By definition,
   // there are no intergenerational pointers in code or data spaces.
   IterateRSet(old_pointer_space_, &ScavengePointer);
   IterateRSet(map_space_, &ScavengePointer);
   lo_space_->IterateRSet(&ScavengePointer);
+#endif   // V8_HOST_ARCH_64_BIT
 
   do {
     ASSERT(new_space_front <= new_space_.top());
@@ -775,6 +801,8 @@
 
 
 int Heap::UpdateRSet(HeapObject* obj) {
+#ifndef V8_HOST_ARCH_64_BIT
+  // TODO(X64) Reenable RSet when we have a working 64-bit layout of Page.
   ASSERT(!InNewSpace(obj));
   // Special handling of fixed arrays to iterate the body based on the start
   // address and offset.  Just iterating the pointers as in UpdateRSetVisitor
@@ -796,6 +824,7 @@
     UpdateRSetVisitor v;
     obj->Iterate(&v);
   }
+#endif  // V8_HOST_ARCH_64_BIT
   return obj->Size();
 }
 
@@ -996,7 +1025,7 @@
   meta_map_ = reinterpret_cast<Map*>(obj);
   meta_map()->set_map(meta_map());
 
-  obj = AllocatePartialMap(FIXED_ARRAY_TYPE, Array::kHeaderSize);
+  obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
   if (obj->IsFailure()) return false;
   fixed_array_map_ = Map::cast(obj);
 
@@ -1053,37 +1082,37 @@
   STRING_TYPE_LIST(ALLOCATE_STRING_MAP);
 #undef ALLOCATE_STRING_MAP
 
-  obj = AllocateMap(SHORT_STRING_TYPE, SeqTwoByteString::kHeaderSize);
+  obj = AllocateMap(SHORT_STRING_TYPE, SeqTwoByteString::kAlignedSize);
   if (obj->IsFailure()) return false;
   undetectable_short_string_map_ = Map::cast(obj);
   undetectable_short_string_map_->set_is_undetectable();
 
-  obj = AllocateMap(MEDIUM_STRING_TYPE, SeqTwoByteString::kHeaderSize);
+  obj = AllocateMap(MEDIUM_STRING_TYPE, SeqTwoByteString::kAlignedSize);
   if (obj->IsFailure()) return false;
   undetectable_medium_string_map_ = Map::cast(obj);
   undetectable_medium_string_map_->set_is_undetectable();
 
-  obj = AllocateMap(LONG_STRING_TYPE, SeqTwoByteString::kHeaderSize);
+  obj = AllocateMap(LONG_STRING_TYPE, SeqTwoByteString::kAlignedSize);
   if (obj->IsFailure()) return false;
   undetectable_long_string_map_ = Map::cast(obj);
   undetectable_long_string_map_->set_is_undetectable();
 
-  obj = AllocateMap(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize);
+  obj = AllocateMap(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
   if (obj->IsFailure()) return false;
   undetectable_short_ascii_string_map_ = Map::cast(obj);
   undetectable_short_ascii_string_map_->set_is_undetectable();
 
-  obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize);
+  obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
   if (obj->IsFailure()) return false;
   undetectable_medium_ascii_string_map_ = Map::cast(obj);
   undetectable_medium_ascii_string_map_->set_is_undetectable();
 
-  obj = AllocateMap(LONG_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize);
+  obj = AllocateMap(LONG_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
   if (obj->IsFailure()) return false;
   undetectable_long_ascii_string_map_ = Map::cast(obj);
   undetectable_long_ascii_string_map_->set_is_undetectable();
 
-  obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kHeaderSize);
+  obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kAlignedSize);
   if (obj->IsFailure()) return false;
   byte_array_map_ = Map::cast(obj);
 
@@ -1699,7 +1728,7 @@
 
 
 Object* Heap::CreateCode(const CodeDesc& desc,
-                         ScopeInfo<>* sinfo,
+                         ZoneScopeInfo* sinfo,
                          Code::Flags flags,
                          Handle<Object> self_reference) {
   // Compute size
@@ -2635,12 +2664,13 @@
 #endif  // DEBUG
 
 
-void Heap::IterateRSetRange(Address object_start,
-                            Address object_end,
-                            Address rset_start,
-                            ObjectSlotCallback copy_object_func) {
+int Heap::IterateRSetRange(Address object_start,
+                           Address object_end,
+                           Address rset_start,
+                           ObjectSlotCallback copy_object_func) {
   Address object_address = object_start;
   Address rset_address = rset_start;
+  int set_bits_count = 0;
 
   // Loop over all the pointers in [object_start, object_end).
   while (object_address < object_end) {
@@ -2657,6 +2687,7 @@
           // If this pointer does not need to be remembered anymore, clear
           // the remembered set bit.
           if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
+          set_bits_count++;
         }
         object_address += kPointerSize;
       }
@@ -2670,6 +2701,7 @@
     }
     rset_address += kIntSize;
   }
+  return set_bits_count;
 }
 
 
@@ -2677,11 +2709,20 @@
   ASSERT(Page::is_rset_in_use());
   ASSERT(space == old_pointer_space_ || space == map_space_);
 
+  static void* paged_rset_histogram = StatsTable::CreateHistogram(
+      "V8.RSetPaged",
+      0,
+      Page::kObjectAreaSize / kPointerSize,
+      30);
+
   PageIterator it(space, PageIterator::PAGES_IN_USE);
   while (it.has_next()) {
     Page* page = it.next();
-    IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
-                     page->RSetStart(), copy_object_func);
+    int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
+                                 page->RSetStart(), copy_object_func);
+    if (paged_rset_histogram != NULL) {
+      StatsTable::AddHistogramSample(paged_rset_histogram, count);
+    }
   }
 }
 
diff --git a/src/heap.h b/src/heap.h
index ccc552f..d8080b6 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -28,7 +28,10 @@
 #ifndef V8_HEAP_H_
 #define V8_HEAP_H_
 
-namespace v8 { namespace internal {
+#include "zone-inl.h"
+
+namespace v8 {
+namespace internal {
 
 // Defines all the roots in Heap.
 #define STRONG_ROOT_LIST(V)                             \
@@ -570,7 +573,7 @@
   // object by containing this pointer.
   // Please note this function does not perform a garbage collection.
   static Object* CreateCode(const CodeDesc& desc,
-                            ScopeInfo<>* sinfo,
+                            ZoneScopeInfo* sinfo,
                             Code::Flags flags,
                             Handle<Object> self_reference);
 
@@ -664,10 +667,11 @@
   // Iterates a range of remembered set addresses starting with rset_start
   // corresponding to the range of allocated pointers
   // [object_start, object_end).
-  static void IterateRSetRange(Address object_start,
-                               Address object_end,
-                               Address rset_start,
-                               ObjectSlotCallback copy_object_func);
+  // Returns the number of bits that were set.
+  static int IterateRSetRange(Address object_start,
+                              Address object_end,
+                              Address rset_start,
+                              ObjectSlotCallback copy_object_func);
 
   // Returns whether the object resides in new space.
   static inline bool InNewSpace(Object* object);
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index c1edd32..045f176 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -39,7 +39,8 @@
 
 #include "cpu.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 Condition NegateCondition(Condition cc) {
   return static_cast<Condition>(cc ^ 1);
@@ -277,6 +278,22 @@
 }
 
 
+void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
+  ASSERT(len_ == 1);
+  ASSERT((scale & -4) == 0);
+  // Use SIB with no index register only for base esp.
+  ASSERT(!index.is(esp) || base.is(esp));
+  buf_[1] = scale << 6 | index.code() << 3 | base.code();
+  len_ = 2;
+}
+
+
+void Operand::set_disp8(int8_t disp) {
+  ASSERT(len_ == 1 || len_ == 2);
+  *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
+}
+
+
 void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
   ASSERT(len_ == 1 || len_ == 2);
   int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 3a2d3f8..3628975 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -40,7 +40,8 @@
 #include "macro-assembler.h"
 #include "serialize.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -----------------------------------------------------------------------------
 // Implementation of Register
@@ -256,20 +257,6 @@
 }
 
 
-void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
-  ASSERT(len_ == 1);
-  ASSERT((scale & -4) == 0);
-  buf_[1] = scale << 6 | index.code() << 3 | base.code();
-  len_ = 2;
-}
-
-
-void Operand::set_disp8(int8_t disp) {
-  ASSERT(len_ == 1 || len_ == 2);
-  *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
-}
-
-
 bool Operand::is_reg(Register reg) const {
   return ((buf_[0] & 0xF8) == 0xC0)  // addressing mode is register only.
       && ((buf_[0] & 0x07) == reg.code());  // register codes match.
@@ -288,7 +275,7 @@
 #endif
 
 // spare_buffer_
-static byte* spare_buffer_ = NULL;
+byte* Assembler::spare_buffer_ = NULL;
 
 Assembler::Assembler(void* buffer, int buffer_size) {
   if (buffer == NULL) {
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 4c99588..c1260d9 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -37,7 +37,8 @@
 #ifndef V8_IA32_ASSEMBLER_IA32_H_
 #define V8_IA32_ASSEMBLER_IA32_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // CPU Registers.
 //
@@ -815,6 +816,8 @@
   int buffer_size_;
   // True if the assembler owns the buffer, false if buffer is external.
   bool own_buffer_;
+  // A previously allocated buffer of kMinimalBufferSize bytes, or NULL.
+  static byte* spare_buffer_;
 
   // code generation
   byte* pc_;  // the program counter; moves forward
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 8fbe634..f65074b 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -29,7 +29,8 @@
 
 #include "codegen-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 #define __ ACCESS_MASM(masm)
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index fa8e661..5dec412 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -36,7 +36,8 @@
 #include "runtime.h"
 #include "scopes.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #define __ ACCESS_MASM(masm_)
 
@@ -107,7 +108,7 @@
   RegisterAllocator register_allocator(this);
   allocator_ = &register_allocator;
   ASSERT(frame_ == NULL);
-  frame_ = new VirtualFrame(this);
+  frame_ = new VirtualFrame();
   set_in_spilled_code(false);
 
   // Adjust for function-level loop nesting.
@@ -138,10 +139,10 @@
     frame_->Enter();
 
     // Allocate space for locals and initialize them.
-    frame_->AllocateStackSlots(scope_->num_stack_slots());
+    frame_->AllocateStackSlots();
     // Initialize the function return target after the locals are set
     // up, because it needs the expected frame height from the frame.
-    function_return_.Initialize(this, JumpTarget::BIDIRECTIONAL);
+    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
     function_return_is_shadowed_ = false;
 
     // Allocate the arguments object and copy the parameters into it.
@@ -468,14 +469,14 @@
   int original_height = frame_->height();
 #endif
   ASSERT(!in_spilled_code());
-  JumpTarget true_target(this);
-  JumpTarget false_target(this);
+  JumpTarget true_target;
+  JumpTarget false_target;
   ControlDestination dest(&true_target, &false_target, true);
   LoadCondition(x, typeof_state, &dest, false);
 
   if (dest.false_was_fall_through()) {
     // The false target was just bound.
-    JumpTarget loaded(this);
+    JumpTarget loaded;
     frame_->Push(Factory::false_value());
     // There may be dangling jumps to the true target.
     if (true_target.is_linked()) {
@@ -488,7 +489,7 @@
   } else if (dest.is_used()) {
     // There is true, and possibly false, control flow (with true as
     // the fall through).
-    JumpTarget loaded(this);
+    JumpTarget loaded;
     frame_->Push(Factory::true_value());
     if (false_target.is_linked()) {
       loaded.Jump();
@@ -504,7 +505,7 @@
     // short-circuited boolean operators).
     ASSERT(has_valid_frame());
     if (true_target.is_linked() || false_target.is_linked()) {
-      JumpTarget loaded(this);
+      JumpTarget loaded;
       loaded.Jump();  // Don't lose the current TOS.
       if (true_target.is_linked()) {
         true_target.Bind();
@@ -1541,7 +1542,7 @@
       // where both sides are Smis.
       left_side.ToRegister();
       ASSERT(left_side.is_valid());
-      JumpTarget is_smi(this);
+      JumpTarget is_smi;
       __ test(left_side.reg(), Immediate(kSmiTagMask));
       is_smi.Branch(zero, &left_side, &right_side, taken);
 
@@ -1612,7 +1613,7 @@
         (right_side.is_constant() && !right_side.handle()->IsSmi());
     left_side.ToRegister();
     right_side.ToRegister();
-    JumpTarget is_smi(this);
+    JumpTarget is_smi;
     if (!known_non_smi) {
       // Check for the smi case.
       Result temp = allocator_->Allocate();
@@ -1652,12 +1653,14 @@
 
 class CallFunctionStub: public CodeStub {
  public:
-  explicit CallFunctionStub(int argc) : argc_(argc) { }
+  CallFunctionStub(int argc, InLoopFlag in_loop)
+      : argc_(argc), in_loop_(in_loop) { }
 
   void Generate(MacroAssembler* masm);
 
  private:
   int argc_;
+  InLoopFlag in_loop_;
 
 #ifdef DEBUG
   void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
@@ -1665,6 +1668,7 @@
 
   Major MajorKey() { return CallFunction; }
   int MinorKey() { return argc_; }
+  InLoopFlag InLoop() { return in_loop_; }
 };
 
 
@@ -1682,7 +1686,8 @@
   CodeForSourcePosition(position);
 
   // Use the shared code stub to call the function.
-  CallFunctionStub call_function(arg_count);
+  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub call_function(arg_count, in_loop);
   Result answer = frame_->CallStub(&call_function, arg_count + 1);
   // Restore context and replace function on the stack with the
   // result of the stub invocation.
@@ -1757,7 +1762,7 @@
   ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ Block");
   CodeForStatementPosition(node);
-  node->break_target()->Initialize(this);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
   VisitStatements(node->statements());
   if (node->break_target()->is_linked()) {
     node->break_target()->Bind();
@@ -1871,10 +1876,10 @@
   bool has_else_stm = node->HasElseStatement();
 
   CodeForStatementPosition(node);
-  JumpTarget exit(this);
+  JumpTarget exit;
   if (has_then_stm && has_else_stm) {
-    JumpTarget then(this);
-    JumpTarget else_(this);
+    JumpTarget then;
+    JumpTarget else_;
     ControlDestination dest(&then, &else_, true);
     LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
 
@@ -1901,7 +1906,7 @@
 
   } else if (has_then_stm) {
     ASSERT(!has_else_stm);
-    JumpTarget then(this);
+    JumpTarget then;
     ControlDestination dest(&then, &exit, true);
     LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
 
@@ -1921,7 +1926,7 @@
 
   } else if (has_else_stm) {
     ASSERT(!has_then_stm);
-    JumpTarget else_(this);
+    JumpTarget else_;
     ControlDestination dest(&exit, &else_, false);
     LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
 
@@ -2088,8 +2093,8 @@
   // placeholders, and fill in the addresses after the labels have been
   // bound.
 
-  JumpTarget setup_default(this);
-  JumpTarget is_smi(this);
+  JumpTarget setup_default;
+  JumpTarget is_smi;
 
   // A non-null default label pointer indicates a default case among
   // the case labels.  Otherwise we use the break target as a
@@ -2134,7 +2139,7 @@
   // frame of the correct height can be merged to).  Keep a copy to
   // restore at the start of every label.  Create a jump target and
   // bind it to set its entry frame properly.
-  JumpTarget entry_target(this, JumpTarget::BIDIRECTIONAL);
+  JumpTarget entry_target(JumpTarget::BIDIRECTIONAL);
   entry_target.Bind(&smi_value);
   VirtualFrame* start_frame = new VirtualFrame(frame_);
 
@@ -2191,7 +2196,7 @@
   ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ SwitchStatement");
   CodeForStatementPosition(node);
-  node->break_target()->Initialize(this);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
 
   // Compile the switch value.
   Load(node->tag());
@@ -2204,7 +2209,7 @@
   int length = cases->length();
   CaseClause* default_clause = NULL;
 
-  JumpTarget next_test(this);
+  JumpTarget next_test;
   // Compile the case label expressions and comparisons.  Exit early
   // if a comparison is unconditionally true.  The target next_test is
   // bound before the loop in order to indicate control flow to the
@@ -2212,7 +2217,6 @@
   next_test.Bind();
   for (int i = 0; i < length && !next_test.is_unused(); i++) {
     CaseClause* clause = cases->at(i);
-    clause->body_target()->Initialize(this);
     // The default is not a test, but remember it for later.
     if (clause->is_default()) {
       default_clause = clause;
@@ -2283,7 +2287,7 @@
           if (clause->is_default()) {
             clause->body_target()->Bind();
           } else {
-            JumpTarget body(this);
+            JumpTarget body;
             body.Jump();
             clause->body_target()->Bind();
             frame_->Drop();
@@ -2321,7 +2325,7 @@
   ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ LoopStatement");
   CodeForStatementPosition(node);
-  node->break_target()->Initialize(this);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
 
   // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
   // known result for the test expression, with no side effects.
@@ -2342,21 +2346,21 @@
 
   switch (node->type()) {
     case LoopStatement::DO_LOOP: {
-      JumpTarget body(this, JumpTarget::BIDIRECTIONAL);
+      JumpTarget body(JumpTarget::BIDIRECTIONAL);
       IncrementLoopNesting();
 
       // Label the top of the loop for the backward jump if necessary.
       if (info == ALWAYS_TRUE) {
         // Use the continue target.
-        node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
         node->continue_target()->Bind();
       } else if (info == ALWAYS_FALSE) {
         // No need to label it.
-        node->continue_target()->Initialize(this);
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
       } else {
         // Continue is the test, so use the backward body target.
         ASSERT(info == DONT_KNOW);
-        node->continue_target()->Initialize(this);
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
         body.Bind();
       }
 
@@ -2415,27 +2419,25 @@
 
       JumpTarget body;
       if (test_at_bottom) {
-        body.Initialize(this, JumpTarget::BIDIRECTIONAL);
-      } else {
-        body.Initialize(this);
+        body.set_direction(JumpTarget::BIDIRECTIONAL);
       }
 
       // Based on the condition analysis, compile the test as necessary.
       if (info == ALWAYS_TRUE) {
         // We will not compile the test expression.  Label the top of
         // the loop with the continue target.
-        node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
         node->continue_target()->Bind();
       } else {
         ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
         if (test_at_bottom) {
           // Continue is the test at the bottom, no need to label the
           // test at the top.  The body is a backward target.
-          node->continue_target()->Initialize(this);
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
         } else {
           // Label the test at the top as the continue target.  The
           // body is a forward-only target.
-          node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
           node->continue_target()->Bind();
         }
         // Compile the test with the body as the true target and
@@ -2518,15 +2520,13 @@
 
       // Target for backward edge if no test at the bottom, otherwise
       // unused.
-      JumpTarget loop(this, JumpTarget::BIDIRECTIONAL);
+      JumpTarget loop(JumpTarget::BIDIRECTIONAL);
 
       // Target for backward edge if there is a test at the bottom,
       // otherwise used as target for test at the top.
       JumpTarget body;
       if (test_at_bottom) {
-        body.Initialize(this, JumpTarget::BIDIRECTIONAL);
-      } else {
-        body.Initialize(this);
+        body.set_direction(JumpTarget::BIDIRECTIONAL);
       }
 
       // Based on the condition analysis, compile the test as necessary.
@@ -2535,11 +2535,11 @@
         // the loop.
         if (node->next() == NULL) {
           // Use the continue target if there is no update expression.
-          node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
           node->continue_target()->Bind();
         } else {
           // Otherwise use the backward loop target.
-          node->continue_target()->Initialize(this);
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
           loop.Bind();
         }
       } else {
@@ -2547,16 +2547,16 @@
         if (test_at_bottom) {
           // Continue is either the update expression or the test at
           // the bottom, no need to label the test at the top.
-          node->continue_target()->Initialize(this);
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
         } else if (node->next() == NULL) {
           // We are not recompiling the test at the bottom and there
           // is no update expression.
-          node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
           node->continue_target()->Bind();
         } else {
           // We are not recompiling the test at the bottom and there
           // is an update expression.
-          node->continue_target()->Initialize(this);
+          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
           loop.Bind();
         }
 
@@ -2656,16 +2656,16 @@
 
 void CodeGenerator::VisitForInStatement(ForInStatement* node) {
   ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ ForInStatement");
   CodeForStatementPosition(node);
 
-  JumpTarget primitive(this);
-  JumpTarget jsobject(this);
-  JumpTarget fixed_array(this);
-  JumpTarget entry(this, JumpTarget::BIDIRECTIONAL);
-  JumpTarget end_del_check(this);
-  JumpTarget exit(this);
+  JumpTarget primitive;
+  JumpTarget jsobject;
+  JumpTarget fixed_array;
+  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
+  JumpTarget end_del_check;
+  JumpTarget exit;
 
   // Get the object to enumerate over (converted to JSObject).
   LoadAndSpill(node->enumerable());
@@ -2750,8 +2750,8 @@
   entry.Bind();
   // Grab the current frame's height for the break and continue
   // targets only after all the state is pushed on the frame.
-  node->break_target()->Initialize(this);
-  node->continue_target()->Initialize(this);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
 
   __ mov(eax, frame_->ElementAt(0));  // load the current count
   __ cmp(eax, frame_->ElementAt(1));  // compare to the array length
@@ -2846,12 +2846,12 @@
 
 void CodeGenerator::VisitTryCatch(TryCatch* node) {
   ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ TryCatch");
   CodeForStatementPosition(node);
 
-  JumpTarget try_block(this);
-  JumpTarget exit(this);
+  JumpTarget try_block;
+  JumpTarget exit;
 
   try_block.Call();
   // --- Catch block ---
@@ -2985,7 +2985,7 @@
 
 void CodeGenerator::VisitTryFinally(TryFinally* node) {
   ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope(this);
+  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ TryFinally");
   CodeForStatementPosition(node);
 
@@ -2994,8 +2994,8 @@
   // break/continue from within the try block.
   enum { FALLING, THROWING, JUMPING };
 
-  JumpTarget try_block(this);
-  JumpTarget finally_block(this);
+  JumpTarget try_block;
+  JumpTarget finally_block;
 
   try_block.Call();
 
@@ -3147,7 +3147,7 @@
         } else {
           // Branch around the preparation for return which may emit
           // code.
-          JumpTarget skip(this);
+          JumpTarget skip;
           skip.Branch(not_equal);
           frame_->PrepareForReturn();
           original->Jump(&return_value);
@@ -3161,7 +3161,7 @@
 
   if (has_valid_frame()) {
     // Check if we need to rethrow the exception.
-    JumpTarget exit(this);
+    JumpTarget exit;
     __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
     exit.Branch(not_equal);
 
@@ -3221,9 +3221,9 @@
 
 void CodeGenerator::VisitConditional(Conditional* node) {
   Comment cmnt(masm_, "[ Conditional");
-  JumpTarget then(this);
-  JumpTarget else_(this);
-  JumpTarget exit(this);
+  JumpTarget then;
+  JumpTarget else_;
+  JumpTarget exit;
   ControlDestination dest(&then, &else_, true);
   LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
 
@@ -3255,8 +3255,8 @@
   if (slot->type() == Slot::LOOKUP) {
     ASSERT(slot->var()->is_dynamic());
 
-    JumpTarget slow(this);
-    JumpTarget done(this);
+    JumpTarget slow;
+    JumpTarget done;
     Result value;
 
     // Generate fast-case code for variables that might be shadowed by
@@ -3320,9 +3320,9 @@
     //
     // We currently spill the virtual frame because constants use the
     // potentially unsafe direct-frame access of SlotOperand.
-    VirtualFrame::SpilledScope spilled_scope(this);
+    VirtualFrame::SpilledScope spilled_scope;
     Comment cmnt(masm_, "[ Load const");
-    JumpTarget exit(this);
+    JumpTarget exit;
     __ mov(ecx, SlotOperand(slot, ecx));
     __ cmp(ecx, Factory::the_hole_value());
     exit.Branch(not_equal);
@@ -3460,7 +3460,7 @@
   } else {
     ASSERT(!slot->var()->is_dynamic());
 
-    JumpTarget exit(this);
+    JumpTarget exit;
     if (init_state == CONST_INIT) {
       ASSERT(slot->var()->mode() == Variable::CONST);
       // Only the first const initialization must be executed (the slot
@@ -3470,7 +3470,7 @@
       // We spill the frame in the code below because the direct-frame
       // access of SlotOperand is potentially unsafe with an unspilled
       // frame.
-      VirtualFrame::SpilledScope spilled_scope(this);
+      VirtualFrame::SpilledScope spilled_scope;
       Comment cmnt(masm_, "[ Init const");
       __ mov(ecx, SlotOperand(slot, ecx));
       __ cmp(ecx, Factory::the_hole_value());
@@ -4221,7 +4221,8 @@
 
   // Call the function.
   CodeForSourcePosition(node->position());
-  CallFunctionStub call_function(arg_count);
+  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub call_function(arg_count, in_loop);
   result = frame_->CallStub(&call_function, arg_count + 1);
 
   // Restore the context and overwrite the function on the stack with
@@ -4285,13 +4286,13 @@
 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
-  JumpTarget slow_case(this);
-  JumpTarget end(this);
-  JumpTarget not_a_flat_string(this);
-  JumpTarget a_cons_string(this);
-  JumpTarget try_again_with_new_string(this, JumpTarget::BIDIRECTIONAL);
-  JumpTarget ascii_string(this);
-  JumpTarget got_char_code(this);
+  JumpTarget slow_case;
+  JumpTarget end;
+  JumpTarget not_a_flat_string;
+  JumpTarget a_cons_string;
+  JumpTarget try_again_with_new_string(JumpTarget::BIDIRECTIONAL);
+  JumpTarget ascii_string;
+  JumpTarget got_char_code;
 
   Load(args->at(0));
   Load(args->at(1));
@@ -4449,7 +4450,7 @@
 
 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
-  JumpTarget leave(this);
+  JumpTarget leave;
   Load(args->at(0));  // Load the object.
   frame_->Dup();
   Result object = frame_->Pop();
@@ -4473,7 +4474,7 @@
 
 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
-  JumpTarget leave(this);
+  JumpTarget leave;
   Load(args->at(0));  // Load the object.
   Load(args->at(1));  // Load the value.
   Result value = frame_->Pop();
@@ -4585,9 +4586,10 @@
   }
 
   if (function == NULL) {
-    // Call the JS runtime function.  Pass 0 as the loop nesting depth
-    // because we do not handle runtime calls specially in loops.
-    Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count, 0);
+    // Call the JS runtime function.
+    Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
+                                       arg_count,
+                                       loop_nesting_);
     frame_->RestoreContextRegister();
     frame_->SetElementAt(0, &answer);
   } else {
@@ -4702,8 +4704,8 @@
 
       case Token::BIT_NOT: {
         // Smi check.
-        JumpTarget smi_label(this);
-        JumpTarget continue_label(this);
+        JumpTarget smi_label;
+        JumpTarget continue_label;
         Result operand = frame_->Pop();
         operand.ToRegister();
         __ test(operand.reg(), Immediate(kSmiTagMask));
@@ -4726,7 +4728,7 @@
 
       case Token::ADD: {
         // Smi check.
-        JumpTarget continue_label(this);
+        JumpTarget continue_label;
         Result operand = frame_->Pop();
         operand.ToRegister();
         __ test(operand.reg(), Immediate(kSmiTagMask));
@@ -4884,6 +4886,7 @@
       tmp.Unuse();
       deferred->enter()->Branch(not_zero, &value, not_taken);
     } else {  // Otherwise we test separately for overflow and smi check.
+      deferred->SetEntryFrame(&value);
       deferred->enter()->Branch(overflow, &value, not_taken);
       __ test(value.reg(), Immediate(kSmiTagMask));
       deferred->enter()->Branch(not_zero, &value, not_taken);
@@ -4921,7 +4924,7 @@
   // is necessary because we assume that if we get control flow on the
   // last path out of an expression we got it on all paths.
   if (op == Token::AND) {
-    JumpTarget is_true(this);
+    JumpTarget is_true;
     ControlDestination dest(&is_true, destination()->false_target(), true);
     LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
 
@@ -4959,8 +4962,8 @@
       // We have a materialized value on the frame, so we exit with
       // one on all paths.  There are possibly also jumps to is_true
       // from nested subexpressions.
-      JumpTarget pop_and_continue(this);
-      JumpTarget exit(this);
+      JumpTarget pop_and_continue;
+      JumpTarget exit;
 
       // Avoid popping the result if it converts to 'false' using the
       // standard ToBoolean() conversion as described in ECMA-262,
@@ -4984,7 +4987,7 @@
     }
 
   } else if (op == Token::OR) {
-    JumpTarget is_false(this);
+    JumpTarget is_false;
     ControlDestination dest(destination()->true_target(), &is_false, false);
     LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
 
@@ -5021,8 +5024,8 @@
       // We have a materialized value on the frame, so we exit with
       // one on all paths.  There are possibly also jumps to is_false
       // from nested subexpressions.
-      JumpTarget pop_and_continue(this);
-      JumpTarget exit(this);
+      JumpTarget pop_and_continue;
+      JumpTarget exit;
 
       // Avoid popping the result if it converts to 'true' using the
       // standard ToBoolean() conversion as described in ECMA-262,
@@ -5398,9 +5401,13 @@
       bool is_global = var != NULL;
       ASSERT(!is_global || var->is_global());
 
-      if (is_global || cgen_->scope()->is_global_scope()) {
-        // Do not inline the inobject property case for loads from the
-        // global object or loads in toplevel code.
+      // Do not inline the inobject property case for loads from the global
+      // object.  Also do not inline for unoptimized code.  This saves time
+      // in the code generator.  Unoptimized code is toplevel code or code
+      // that is not in a loop.
+      if (is_global ||
+          cgen_->scope()->is_global_scope() ||
+          cgen_->loop_nesting() == 0) {
         Comment cmnt(masm, "[ Load from named Property");
         cgen_->frame()->Push(GetName());
 
@@ -5740,22 +5747,27 @@
     ASSERT(kSmiTag == 0);  // adjust zero check if not the case
     __ test(answer.reg(), Immediate(kSmiTagMask));
   }
-  enter()->Branch(not_zero, left, right, not_taken);
-
-  // All operations start by copying the left argument into answer.
-  __ mov(answer.reg(), left->reg());
   switch (op_) {
     case Token::ADD:
+      SetEntryFrame(left, right);
+      enter()->Branch(not_zero, left, right, not_taken);
+      __ mov(answer.reg(), left->reg());
       __ add(answer.reg(), Operand(right->reg()));  // add optimistically
       enter()->Branch(overflow, left, right, not_taken);
       break;
 
     case Token::SUB:
+      SetEntryFrame(left, right);
+      enter()->Branch(not_zero, left, right, not_taken);
+      __ mov(answer.reg(), left->reg());
       __ sub(answer.reg(), Operand(right->reg()));  // subtract optimistically
       enter()->Branch(overflow, left, right, not_taken);
       break;
 
     case Token::MUL: {
+      SetEntryFrame(left, right);
+      enter()->Branch(not_zero, left, right, not_taken);
+      __ mov(answer.reg(), left->reg());
       // If the smi tag is 0 we can just leave the tag on one operand.
       ASSERT(kSmiTag == 0);  // adjust code below if not the case
       // Remove tag from the left operand (but keep sign).
@@ -5782,6 +5794,8 @@
 
     case Token::DIV:  // Fall through.
     case Token::MOD: {
+      enter()->Branch(not_zero, left, right, not_taken);
+      __ mov(answer.reg(), left->reg());
       // Div and mod use the registers eax and edx.  Left and right must
       // be preserved, because the original operands are needed if we switch
       // to the slow case.  Move them if either is in eax or edx.
@@ -5919,20 +5933,28 @@
       break;
     }
     case Token::BIT_OR:
+      enter()->Branch(not_zero, left, right, not_taken);
+      __ mov(answer.reg(), left->reg());
       __ or_(answer.reg(), Operand(right->reg()));
       break;
 
     case Token::BIT_AND:
+      enter()->Branch(not_zero, left, right, not_taken);
+      __ mov(answer.reg(), left->reg());
       __ and_(answer.reg(), Operand(right->reg()));
       break;
 
     case Token::BIT_XOR:
+      enter()->Branch(not_zero, left, right, not_taken);
+      __ mov(answer.reg(), left->reg());
       __ xor_(answer.reg(), Operand(right->reg()));
       break;
 
     case Token::SHL:
     case Token::SHR:
     case Token::SAR:
+      enter()->Branch(not_zero, left, right, not_taken);
+      __ mov(answer.reg(), left->reg());
       // Move right into ecx.
       // Left is in two registers already, so even if left or answer is ecx,
       // we can move right to it, and use the other one.
@@ -5994,7 +6016,7 @@
           // the left and right arguments, and jump to slow case.
           // The low bit of the left argument may be lost, but only
           // in a case where it is dropped anyway.
-          JumpTarget result_ok(generator());
+          JumpTarget result_ok;
           __ test(left->reg(), Immediate(0xc0000000));
           result_ok.Branch(zero, left, taken);
           __ shl(left->reg());
@@ -6008,7 +6030,7 @@
         case Token::SHL: {
           __ shl(left->reg());
           // Check that the *signed* result fits in a smi.
-          JumpTarget result_ok(generator());
+          JumpTarget result_ok;
           __ cmp(left->reg(), 0xc0000000);
           result_ok.Branch(positive, left, taken);
 
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 0e01957..1307727 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -28,7 +28,8 @@
 #ifndef V8_IA32_CODEGEN_IA32_H_
 #define V8_IA32_CODEGEN_IA32_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Forward declarations
 class DeferredCode;
@@ -347,7 +348,6 @@
   void IncrementLoopNesting() { loop_nesting_++; }
   void DecrementLoopNesting() { loop_nesting_--; }
 
-
   // Node visitors.
   void VisitStatements(ZoneList<Statement*>* statements);
 
@@ -487,8 +487,7 @@
   Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
-  Handle<Code> ComputeCallInitialize(int argc);
-  Handle<Code> ComputeCallInitializeInLoop(int argc);
+  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
 
   // Declare global variables and functions in the given array of
   // name/value pairs.
@@ -588,7 +587,7 @@
 
   bool is_eval_;  // Tells whether code is generated for eval.
   Handle<Script> script_;
-  List<DeferredCode*> deferred_;
+  ZoneList<DeferredCode*> deferred_;
 
   // Assembler
   MacroAssembler* masm_;  // to generate code
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index ff70055..82a5565 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -32,7 +32,8 @@
 #include "cpu.h"
 #include "macro-assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 void CPU::Setup() {
   CpuFeatures::Probe();
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 9503cfc..9913a39 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -31,7 +31,8 @@
 #include "debug.h"
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
index 1bc62ec..dea439f 100644
--- a/src/ia32/frames-ia32.cc
+++ b/src/ia32/frames-ia32.cc
@@ -29,7 +29,8 @@
 
 #include "frames-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 StackFrame::Type StackFrame::ComputeType(State* state) {
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index f86dbe4..aec1f48 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -28,7 +28,8 @@
 #ifndef V8_IA32_FRAMES_IA32_H_
 #define V8_IA32_FRAMES_IA32_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // Register lists
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 4231bfa..d7f264d 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -32,7 +32,8 @@
 #include "runtime.h"
 #include "stub-cache.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // ----------------------------------------------------------------------------
 // Static IC stub generators.
@@ -426,7 +427,7 @@
 
   // Probe the stub cache.
   Code::Flags flags =
-      Code::ComputeFlags(Code::CALL_IC, MONOMORPHIC, NORMAL, argc);
+      Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
   StubCache::GenerateProbe(masm, flags, edx, ecx, ebx);
 
   // If the stub cache probing failed, the receiver might be a value.
@@ -635,7 +636,9 @@
   __ mov(eax, Operand(esp, kPointerSize));
 
   // Probe the stub cache.
-  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
+  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
   StubCache::GenerateProbe(masm, flags, eax, ecx, ebx);
 
   // Cache miss: Jump to runtime.
@@ -838,7 +841,9 @@
 
   // Get the receiver from the stack and probe the stub cache.
   __ mov(edx, Operand(esp, 4));
-  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC);
+  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
   StubCache::GenerateProbe(masm, flags, edx, ecx, ebx);
 
   // Cache miss: Jump to runtime.
diff --git a/src/ia32/jump-target-ia32.cc b/src/ia32/jump-target-ia32.cc
index df2eb59..55421a2 100644
--- a/src/ia32/jump-target-ia32.cc
+++ b/src/ia32/jump-target-ia32.cc
@@ -28,49 +28,51 @@
 #include "v8.h"
 
 #include "codegen-inl.h"
+#include "jump-target-inl.h"
 #include "register-allocator-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // JumpTarget implementation.
 
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(cgen()->masm())
 
 void JumpTarget::DoJump() {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen()->has_valid_frame());
   // Live non-frame registers are not allowed at unconditional jumps
   // because we have no way of invalidating the corresponding results
   // which are still live in the C++ code.
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
 
   if (is_bound()) {
     // Backward jump.  There is an expected frame to merge to.
     ASSERT(direction_ == BIDIRECTIONAL);
-    cgen_->frame()->MergeTo(entry_frame_);
-    cgen_->DeleteFrame();
+    cgen()->frame()->PrepareMergeTo(entry_frame_);
+    cgen()->frame()->MergeTo(entry_frame_);
+    cgen()->DeleteFrame();
     __ jmp(&entry_label_);
   } else if (entry_frame_ != NULL) {
     // Forward jump with a preconfigured entry frame.  Assert the
     // current frame matches the expected one and jump to the block.
-    ASSERT(cgen_->frame()->Equals(entry_frame_));
-    cgen_->DeleteFrame();
+    ASSERT(cgen()->frame()->Equals(entry_frame_));
+    cgen()->DeleteFrame();
     __ jmp(&entry_label_);
   } else {
     // Forward jump.  Remember the current frame and emit a jump to
     // its merge code.
-    AddReachingFrame(cgen_->frame());
+    AddReachingFrame(cgen()->frame());
     RegisterFile empty;
-    cgen_->SetFrame(NULL, &empty);
+    cgen()->SetFrame(NULL, &empty);
     __ jmp(&merge_labels_.last());
   }
 }
 
 
 void JumpTarget::DoBranch(Condition cc, Hint hint) {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen() != NULL);
+  ASSERT(cgen()->has_valid_frame());
 
   if (is_bound()) {
     ASSERT(direction_ == BIDIRECTIONAL);
@@ -80,29 +82,29 @@
     // Swap the current frame for a copy (we do the swapping to get
     // the off-frame registers off the fall through) to use for the
     // branch.
-    VirtualFrame* fall_through_frame = cgen_->frame();
+    VirtualFrame* fall_through_frame = cgen()->frame();
     VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
     RegisterFile non_frame_registers = RegisterAllocator::Reserved();
-    cgen_->SetFrame(branch_frame, &non_frame_registers);
+    cgen()->SetFrame(branch_frame, &non_frame_registers);
 
     // Check if we can avoid merge code.
-    cgen_->frame()->PrepareMergeTo(entry_frame_);
-    if (cgen_->frame()->Equals(entry_frame_)) {
+    cgen()->frame()->PrepareMergeTo(entry_frame_);
+    if (cgen()->frame()->Equals(entry_frame_)) {
       // Branch right in to the block.
-      cgen_->DeleteFrame();
+      cgen()->DeleteFrame();
       __ j(cc, &entry_label_, hint);
-      cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+      cgen()->SetFrame(fall_through_frame, &non_frame_registers);
       return;
     }
 
     // Check if we can reuse existing merge code.
     for (int i = 0; i < reaching_frames_.length(); i++) {
       if (reaching_frames_[i] != NULL &&
-          cgen_->frame()->Equals(reaching_frames_[i])) {
+          cgen()->frame()->Equals(reaching_frames_[i])) {
         // Branch to the merge code.
-        cgen_->DeleteFrame();
+        cgen()->DeleteFrame();
         __ j(cc, &merge_labels_[i], hint);
-        cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+        cgen()->SetFrame(fall_through_frame, &non_frame_registers);
         return;
       }
     }
@@ -111,21 +113,21 @@
     // around the merge code on the fall through path.
     Label original_fall_through;
     __ j(NegateCondition(cc), &original_fall_through, NegateHint(hint));
-    cgen_->frame()->MergeTo(entry_frame_);
-    cgen_->DeleteFrame();
+    cgen()->frame()->MergeTo(entry_frame_);
+    cgen()->DeleteFrame();
     __ jmp(&entry_label_);
-    cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+    cgen()->SetFrame(fall_through_frame, &non_frame_registers);
     __ bind(&original_fall_through);
 
   } else if (entry_frame_ != NULL) {
     // Forward branch with a preconfigured entry frame.  Assert the
     // current frame matches the expected one and branch to the block.
-    ASSERT(cgen_->frame()->Equals(entry_frame_));
+    ASSERT(cgen()->frame()->Equals(entry_frame_));
     // Explicitly use the macro assembler instead of __ as forward
     // branches are expected to be a fixed size (no inserted
     // coverage-checking instructions please).  This is used in
     // Reference::GetValue.
-    masm_->j(cc, &entry_label_, hint);
+    cgen()->masm()->j(cc, &entry_label_, hint);
 
   } else {
     // Forward branch.  A copy of the current frame is remembered and
@@ -133,8 +135,8 @@
     // macro assembler instead of __ as forward branches are expected
     // to be a fixed size (no inserted coverage-checking instructions
     // please).  This is used in Reference::GetValue.
-    AddReachingFrame(new VirtualFrame(cgen_->frame()));
-    masm_->j(cc, &merge_labels_.last(), hint);
+    AddReachingFrame(new VirtualFrame(cgen()->frame()));
+    cgen()->masm()->j(cc, &merge_labels_.last(), hint);
   }
 }
 
@@ -146,14 +148,14 @@
   // at the label (which should be the only one) is the spilled current
   // frame plus an in-memory return address.  The "fall-through" frame
   // at the return site is the spilled current frame.
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen() != NULL);
+  ASSERT(cgen()->has_valid_frame());
   // There are no non-frame references across the call.
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
   ASSERT(!is_linked());
 
-  cgen_->frame()->SpillAll();
-  VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
+  cgen()->frame()->SpillAll();
+  VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
   target_frame->Adjust(1);
   // We do not expect a call with a preconfigured entry frame.
   ASSERT(entry_frame_ == NULL);
@@ -163,28 +165,28 @@
 
 
 void JumpTarget::DoBind(int mergable_elements) {
-  ASSERT(cgen_ != NULL);
+  ASSERT(cgen() != NULL);
   ASSERT(!is_bound());
 
   // Live non-frame registers are not allowed at the start of a basic
   // block.
-  ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
+  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
 
   // Fast case: the jump target was manually configured with an entry
   // frame to use.
   if (entry_frame_ != NULL) {
     // Assert no reaching frames to deal with.
     ASSERT(reaching_frames_.is_empty());
-    ASSERT(!cgen_->has_valid_frame());
+    ASSERT(!cgen()->has_valid_frame());
 
     RegisterFile reserved = RegisterAllocator::Reserved();
     if (direction_ == BIDIRECTIONAL) {
       // Copy the entry frame so the original can be used for a
       // possible backward jump.
-      cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved);
+      cgen()->SetFrame(new VirtualFrame(entry_frame_), &reserved);
     } else {
       // Take ownership of the entry frame.
-      cgen_->SetFrame(entry_frame_, &reserved);
+      cgen()->SetFrame(entry_frame_, &reserved);
       entry_frame_ = NULL;
     }
     __ bind(&entry_label_);
@@ -192,12 +194,12 @@
   }
 
   if (!is_linked()) {
-    ASSERT(cgen_->has_valid_frame());
+    ASSERT(cgen()->has_valid_frame());
     if (direction_ == FORWARD_ONLY) {
       // Fast case: no forward jumps and no possible backward jumps.
       // The stack pointer can be floating above the top of the
       // virtual frame before the bind.  Afterward, it should not.
-      VirtualFrame* frame = cgen_->frame();
+      VirtualFrame* frame = cgen()->frame();
       int difference =
           frame->stack_pointer_ - (frame->elements_.length() - 1);
       if (difference > 0) {
@@ -209,22 +211,22 @@
       // Fast case: no forward jumps, possible backward ones.  Remove
       // constants and copies above the watermark on the fall-through
       // frame and use it as the entry frame.
-      cgen_->frame()->MakeMergable(mergable_elements);
-      entry_frame_ = new VirtualFrame(cgen_->frame());
+      cgen()->frame()->MakeMergable(mergable_elements);
+      entry_frame_ = new VirtualFrame(cgen()->frame());
     }
     __ bind(&entry_label_);
     return;
   }
 
   if (direction_ == FORWARD_ONLY &&
-      !cgen_->has_valid_frame() &&
+      !cgen()->has_valid_frame() &&
       reaching_frames_.length() == 1) {
     // Fast case: no fall-through, a single forward jump, and no
     // possible backward jumps.  Pick up the only reaching frame, take
     // ownership of it, and use it for the block about to be emitted.
     VirtualFrame* frame = reaching_frames_[0];
     RegisterFile reserved = RegisterAllocator::Reserved();
-    cgen_->SetFrame(frame, &reserved);
+    cgen()->SetFrame(frame, &reserved);
     reaching_frames_[0] = NULL;
     __ bind(&merge_labels_[0]);
 
@@ -244,11 +246,11 @@
   // If there is a current frame, record it as the fall-through.  It
   // is owned by the reaching frames for now.
   bool had_fall_through = false;
-  if (cgen_->has_valid_frame()) {
+  if (cgen()->has_valid_frame()) {
     had_fall_through = true;
-    AddReachingFrame(cgen_->frame());  // Return value ignored.
+    AddReachingFrame(cgen()->frame());  // Return value ignored.
     RegisterFile empty;
-    cgen_->SetFrame(NULL, &empty);
+    cgen()->SetFrame(NULL, &empty);
   }
 
   // Compute the frame to use for entry to the block.
@@ -283,17 +285,17 @@
           // binding site or as the fall through from a previous merge
           // code block.  Jump around the code we are about to
           // generate.
-          if (cgen_->has_valid_frame()) {
-            cgen_->DeleteFrame();
+          if (cgen()->has_valid_frame()) {
+            cgen()->DeleteFrame();
             __ jmp(&entry_label_);
           }
           // Pick up the frame for this block.  Assume ownership if
           // there cannot be backward jumps.
           RegisterFile reserved = RegisterAllocator::Reserved();
           if (direction_ == BIDIRECTIONAL) {
-            cgen_->SetFrame(new VirtualFrame(frame), &reserved);
+            cgen()->SetFrame(new VirtualFrame(frame), &reserved);
           } else {
-            cgen_->SetFrame(frame, &reserved);
+            cgen()->SetFrame(frame, &reserved);
             reaching_frames_[i] = NULL;
           }
           __ bind(&merge_labels_[i]);
@@ -302,7 +304,7 @@
           // looking for any that can share merge code with this one.
           for (int j = 0; j < i; j++) {
             VirtualFrame* other = reaching_frames_[j];
-            if (other != NULL && other->Equals(cgen_->frame())) {
+            if (other != NULL && other->Equals(cgen()->frame())) {
               // Set the reaching frame element to null to avoid
               // processing it later, and then bind its entry label.
               reaching_frames_[j] = NULL;
@@ -311,13 +313,13 @@
           }
 
           // Emit the merge code.
-          cgen_->frame()->MergeTo(entry_frame_);
+          cgen()->frame()->MergeTo(entry_frame_);
         } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
           // If this is the fall through frame, and it didn't need
           // merge code, we need to pick up the frame so we can jump
           // around subsequent merge blocks if necessary.
           RegisterFile reserved = RegisterAllocator::Reserved();
-          cgen_->SetFrame(frame, &reserved);
+          cgen()->SetFrame(frame, &reserved);
           reaching_frames_[i] = NULL;
         }
       }
@@ -326,9 +328,9 @@
     // The code generator may not have a current frame if there was no
     // fall through and none of the reaching frames needed merging.
     // In that case, clone the entry frame as the current frame.
-    if (!cgen_->has_valid_frame()) {
+    if (!cgen()->has_valid_frame()) {
       RegisterFile reserved_registers = RegisterAllocator::Reserved();
-      cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
+      cgen()->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
     }
 
     // There may be unprocessed reaching frames that did not need
@@ -354,9 +356,9 @@
     // Use a copy of the reaching frame so the original can be saved
     // for possible reuse as a backward merge block.
     RegisterFile reserved = RegisterAllocator::Reserved();
-    cgen_->SetFrame(new VirtualFrame(reaching_frames_[0]), &reserved);
+    cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &reserved);
     __ bind(&merge_labels_[0]);
-    cgen_->frame()->MergeTo(entry_frame_);
+    cgen()->frame()->MergeTo(entry_frame_);
   }
 
   __ bind(&entry_label_);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index d6d5800..4161370 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -33,7 +33,8 @@
 #include "runtime.h"
 #include "serialize.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // MacroAssembler implementation.
@@ -631,7 +632,7 @@
                                       Register result,
                                       Register op,
                                       JumpTarget* then_target) {
-  JumpTarget ok(cgen);
+  JumpTarget ok;
   test(result, Operand(result));
   ok.Branch(not_zero, taken);
   test(op, Operand(op));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index cd7a233..940a8b4 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -30,7 +30,8 @@
 
 #include "assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Forward declaration.
 class JumpTarget;
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 2a0cefd..92cd019 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -35,7 +35,8 @@
 #include "ia32/macro-assembler-ia32.h"
 #include "ia32/regexp-macro-assembler-ia32.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 /*
  * This assembler uses the following register assignment convention
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 8c5dd24..a06700a 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -28,7 +28,8 @@
 #ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
 #define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
  public:
diff --git a/src/ia32/register-allocator-ia32.cc b/src/ia32/register-allocator-ia32.cc
index b493b17..2a51e87 100644
--- a/src/ia32/register-allocator-ia32.cc
+++ b/src/ia32/register-allocator-ia32.cc
@@ -30,7 +30,8 @@
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // Result implementation.
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index bdfc3d6..6c675f9 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,8 @@
 #include "codegen-inl.h"
 #include "stub-cache.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #define __ ACCESS_MASM(masm)
 
@@ -58,7 +59,7 @@
 
   // Check that the flags match what we're looking for.
   __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
-  __ and_(offset, ~Code::kFlagsTypeMask);
+  __ and_(offset, ~Code::kFlagsNotUsedInLookup);
   __ cmp(offset, flags);
   __ j(not_equal, &miss);
 
@@ -470,7 +471,9 @@
 Object* CallStubCompiler::CompileCallField(Object* object,
                                            JSObject* holder,
                                            int index,
-                                           String* name) {
+                                           String* name,
+                                           Code::Flags flags) {
+  ASSERT_EQ(FIELD, Code::ExtractTypeFromFlags(flags));
   // ----------- S t a t e -------------
   // -----------------------------------
   Label miss;
@@ -511,14 +514,16 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(FIELD, name);
+  return GetCodeWithFlags(flags, name);
 }
 
 
 Object* CallStubCompiler::CompileCallConstant(Object* object,
                                               JSObject* holder,
                                               JSFunction* function,
-                                              CheckType check) {
+                                              CheckType check,
+                                              Code::Flags flags) {
+  ASSERT_EQ(CONSTANT_FUNCTION, Code::ExtractTypeFromFlags(flags));
   // ----------- S t a t e -------------
   // -----------------------------------
   Label miss;
@@ -633,7 +638,7 @@
   if (function->shared()->name()->IsString()) {
     function_name = String::cast(function->shared()->name());
   }
-  return GetCode(CONSTANT_FUNCTION, function_name);
+  return GetCodeWithFlags(flags, function_name);
 }
 
 
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index daa4558..f86613d 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -31,26 +31,20 @@
 #include "register-allocator-inl.h"
 #include "scopes.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
 
 // -------------------------------------------------------------------------
 // VirtualFrame implementation.
 
 // On entry to a function, the virtual frame already contains the receiver,
 // the parameters, and a return address.  All frame elements are in memory.
-VirtualFrame::VirtualFrame(CodeGenerator* cgen)
-    : cgen_(cgen),
-      masm_(cgen->masm()),
-      elements_(cgen->scope()->num_parameters()
-                + cgen->scope()->num_stack_slots()
-                + kPreallocatedElements),
-      parameter_count_(cgen->scope()->num_parameters()),
-      local_count_(0),
-      stack_pointer_(parameter_count_ + 1),  // 0-based index of TOS.
-      frame_pointer_(kIllegalIndex) {
-  for (int i = 0; i < parameter_count_ + 2; i++) {
+VirtualFrame::VirtualFrame()
+    : elements_(parameter_count() + local_count() + kPreallocatedElements),
+      stack_pointer_(parameter_count() + 1) {  // 0-based index of TOS.
+  for (int i = 0; i <= stack_pointer_; i++) {
     elements_.Add(FrameElement::MemoryElement());
   }
   for (int i = 0; i < kNumRegisters; i++) {
@@ -80,10 +74,10 @@
       break;
 
     case FrameElement::CONSTANT:
-      if (cgen_->IsUnsafeSmi(element.handle())) {
-        Result temp = cgen_->allocator()->Allocate();
+      if (cgen()->IsUnsafeSmi(element.handle())) {
+        Result temp = cgen()->allocator()->Allocate();
         ASSERT(temp.is_valid());
-        cgen_->LoadUnsafeSmi(temp.reg(), element.handle());
+        cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
         __ mov(Operand(ebp, fp_relative(index)), temp.reg());
       } else {
         __ Set(Operand(ebp, fp_relative(index)),
@@ -95,7 +89,7 @@
       int backing_index = element.index();
       FrameElement backing_element = elements_[backing_index];
       if (backing_element.is_memory()) {
-        Result temp = cgen_->allocator()->Allocate();
+        Result temp = cgen()->allocator()->Allocate();
         ASSERT(temp.is_valid());
         __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
         __ mov(Operand(ebp, fp_relative(index)), temp.reg());
@@ -132,10 +126,10 @@
       break;
 
     case FrameElement::CONSTANT:
-      if (cgen_->IsUnsafeSmi(element.handle())) {
-        Result temp = cgen_->allocator()->Allocate();
+      if (cgen()->IsUnsafeSmi(element.handle())) {
+        Result temp = cgen()->allocator()->Allocate();
         ASSERT(temp.is_valid());
-        cgen_->LoadUnsafeSmi(temp.reg(), element.handle());
+        cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
         __ push(temp.reg());
       } else {
         __ push(Immediate(element.handle()));
@@ -187,16 +181,6 @@
   ASSERT(mergable_elements <= elements_.length());
 
   int start_index = elements_.length() - mergable_elements;
-
-  // The is_copied flags on entry frame elements are expected to be
-  // exact.  Set them for the elements below the water mark.
-  for (int i = 0; i < start_index; i++) {
-    elements_[i].clear_copied();
-    if (elements_[i].is_copy()) {
-      elements_[elements_[i].index()].set_copied();
-    }
-  }
-
   for (int i = start_index; i < elements_.length(); i++) {
     FrameElement element = elements_[i];
 
@@ -210,7 +194,7 @@
         if (element.is_copy()) {
           backing_element = elements_[element.index()];
         }
-        Result fresh = cgen_->allocator()->Allocate();
+        Result fresh = cgen()->allocator()->Allocate();
         ASSERT(fresh.is_valid());
         elements_[i] =
             FrameElement::RegisterElement(fresh.reg(),
@@ -219,8 +203,8 @@
 
         // Emit a move.
         if (element.is_constant()) {
-          if (cgen_->IsUnsafeSmi(element.handle())) {
-            cgen_->LoadUnsafeSmi(fresh.reg(), element.handle());
+          if (cgen()->IsUnsafeSmi(element.handle())) {
+            cgen()->LoadUnsafeSmi(fresh.reg(), element.handle());
           } else {
             __ Set(fresh.reg(), Immediate(element.handle()));
           }
@@ -255,10 +239,10 @@
 
 
 void VirtualFrame::MergeTo(VirtualFrame* expected) {
-  Comment cmnt(masm_, "[ Merge frame");
+  Comment cmnt(masm(), "[ Merge frame");
   // We should always be merging the code generator's current frame to an
   // expected frame.
-  ASSERT(cgen_->frame() == this);
+  ASSERT(cgen()->frame() == this);
 
   // Adjust the stack pointer upward (toward the top of the virtual
   // frame) if necessary.
@@ -272,23 +256,6 @@
   MergeMoveRegistersToRegisters(expected);
   MergeMoveMemoryToRegisters(expected);
 
-  // Fix any sync flag problems from the bottom-up and make the copied
-  // flags exact.  This assumes that the backing store of copies is
-  // always lower in the frame.
-  for (int i = 0; i < elements_.length(); i++) {
-    FrameElement source = elements_[i];
-    FrameElement target = expected->elements_[i];
-    if (source.is_synced() && !target.is_synced()) {
-      elements_[i].clear_sync();
-    } else if (!source.is_synced() && target.is_synced()) {
-      SyncElementAt(i);
-    }
-    elements_[i].clear_copied();
-    if (elements_[i].is_copy()) {
-      elements_[elements_[i].index()].set_copied();
-    }
-  }
-
   // Adjust the stack pointer downward if necessary.
   if (stack_pointer_ > expected->stack_pointer_) {
     int difference = stack_pointer_ - expected->stack_pointer_;
@@ -314,13 +281,9 @@
   // of the index of the frame element esi is caching or kIllegalIndex
   // if esi has not been disturbed.
   int esi_caches = kIllegalIndex;
-  // A "singleton" memory element.
-  FrameElement memory_element = FrameElement::MemoryElement();
-  // Loop downward from the stack pointer or the top of the frame if
-  // the stack pointer is floating above the frame.
-  int start = Min(static_cast<int>(stack_pointer_), elements_.length() - 1);
-  for (int i = start; i >= 0; i--) {
+  for (int i = elements_.length() - 1; i >= 0; i--) {
     FrameElement target = expected->elements_[i];
+    if (target.is_register()) continue;  // Handle registers later.
     if (target.is_memory()) {
       FrameElement source = elements_[i];
       switch (source.type()) {
@@ -342,9 +305,9 @@
 
         case FrameElement::CONSTANT:
           if (!source.is_synced()) {
-            if (cgen_->IsUnsafeSmi(source.handle())) {
+            if (cgen()->IsUnsafeSmi(source.handle())) {
               esi_caches = i;
-              cgen_->LoadUnsafeSmi(esi, source.handle());
+              cgen()->LoadUnsafeSmi(esi, source.handle());
               __ mov(Operand(ebp, fp_relative(i)), esi);
             } else {
               __ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
@@ -370,8 +333,8 @@
           }
           break;
       }
-      elements_[i] = memory_element;
     }
+    elements_[i] = target;
   }
 
   if (esi_caches != kIllegalIndex) {
@@ -388,35 +351,43 @@
     // Move the right value into register i if it is currently in a register.
     int index = expected->register_locations_[i];
     int use_index = register_locations_[i];
-    // Fast check if register is unused in target or already correct
-    if (index != kIllegalIndex
-        && index != use_index
-        && elements_[index].is_register()) {
-      Register source = elements_[index].reg();
-      Register target = { i };
+    // Skip if register i is unused in the target or else if source is
+    // not a register (this is not a register-to-register move).
+    if (index == kIllegalIndex || !elements_[index].is_register()) continue;
+
+    Register target = { i };
+    Register source = elements_[index].reg();
+
+    if (index != use_index) {
       if (use_index == kIllegalIndex) {  // Target is currently unused.
         // Copy contents of source from source to target.
         // Set frame element register to target.
-        elements_[index].set_reg(target);
         Use(target, index);
         Unuse(source);
         __ mov(target, source);
       } else {
         // Exchange contents of registers source and target.
+        // Nothing except the register backing use_index has changed.
         elements_[use_index].set_reg(source);
-        elements_[index].set_reg(target);
         register_locations_[target.code()] = index;
         register_locations_[source.code()] = use_index;
         __ xchg(source, target);
       }
     }
+
+    if (!elements_[index].is_synced() &&
+        expected->elements_[index].is_synced()) {
+      __ mov(Operand(ebp, fp_relative(index)), target);
+    }
+    elements_[index] = expected->elements_[index];
   }
 }
 
 
 void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
   // Move memory, constants, and copies to registers.  This is the
-  // final step and is done from the bottom up so that the backing
+  // final step and since it is not done from the bottom up, but in
+  // register code order, we have special code to ensure that the backing
   // elements of copies are in their correct locations when we
   // encounter the copies.
   for (int i = 0; i < kNumRegisters; i++) {
@@ -425,7 +396,7 @@
       FrameElement source = elements_[index];
       FrameElement target = expected->elements_[index];
       Register target_reg = { i };
-      ASSERT(expected->elements_[index].reg().is(target_reg));
+      ASSERT(target.reg().is(target_reg));
       switch (source.type()) {
         case FrameElement::INVALID:  // Fall through.
           UNREACHABLE();
@@ -440,8 +411,8 @@
           break;
 
         case FrameElement::CONSTANT:
-          if (cgen_->IsUnsafeSmi(source.handle())) {
-            cgen_->LoadUnsafeSmi(target_reg, source.handle());
+          if (cgen()->IsUnsafeSmi(source.handle())) {
+            cgen()->LoadUnsafeSmi(target_reg, source.handle());
           } else {
            __ Set(target_reg, Immediate(source.handle()));
           }
@@ -486,7 +457,7 @@
 
 void VirtualFrame::Enter() {
   // Registers live on entry: esp, ebp, esi, edi.
-  Comment cmnt(masm_, "[ Enter JS frame");
+  Comment cmnt(masm(), "[ Enter JS frame");
 
 #ifdef DEBUG
   // Verify that edi contains a JS function.  The following code
@@ -501,7 +472,6 @@
 
   EmitPush(ebp);
 
-  frame_pointer_ = stack_pointer_;
   __ mov(ebp, Operand(esp));
 
   // Store the context in the frame.  The context is kept in esi and a
@@ -513,12 +483,12 @@
   // reference now (ie, it can keep it in edi or spill it later).
   Push(edi);
   SyncElementAt(elements_.length() - 1);
-  cgen_->allocator()->Unuse(edi);
+  cgen()->allocator()->Unuse(edi);
 }
 
 
 void VirtualFrame::Exit() {
-  Comment cmnt(masm_, "[ Exit JS frame");
+  Comment cmnt(masm(), "[ Exit JS frame");
   // Record the location of the JS exit code for patching when setting
   // break point.
   __ RecordJSReturn();
@@ -528,7 +498,7 @@
   // call instruction to support patching the exit code in the
   // debugger. See VisitReturnStatement for the full return sequence.
   __ mov(esp, Operand(ebp));
-  stack_pointer_ = frame_pointer_;
+  stack_pointer_ = frame_pointer();
   for (int i = elements_.length() - 1; i > stack_pointer_; i--) {
     FrameElement last = elements_.RemoveLast();
     if (last.is_register()) {
@@ -536,17 +506,14 @@
     }
   }
 
-  frame_pointer_ = kIllegalIndex;
   EmitPop(ebp);
 }
 
 
-void VirtualFrame::AllocateStackSlots(int count) {
-  ASSERT(height() == 0);
-  local_count_ = count;
-
+void VirtualFrame::AllocateStackSlots() {
+  int count = local_count();
   if (count > 0) {
-    Comment cmnt(masm_, "[ Allocate space for locals");
+    Comment cmnt(masm(), "[ Allocate space for locals");
     // The locals are initialized to a constant (the undefined value), but
     // we sync them with the actual frame to allocate space for spilling
     // them later.  First sync everything above the stack pointer so we can
@@ -555,7 +522,7 @@
     Handle<Object> undefined = Factory::undefined_value();
     FrameElement initial_value =
         FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
-    Result temp = cgen_->allocator()->Allocate();
+    Result temp = cgen()->allocator()->Allocate();
     ASSERT(temp.is_valid());
     __ Set(temp.reg(), Immediate(undefined));
     for (int i = 0; i < count; i++) {
@@ -580,7 +547,7 @@
 
 
 void VirtualFrame::PushReceiverSlotAddress() {
-  Result temp = cgen_->allocator()->Allocate();
+  Result temp = cgen()->allocator()->Allocate();
   ASSERT(temp.is_valid());
   __ lea(temp.reg(), ParameterAt(-1));
   Push(&temp);
@@ -614,7 +581,7 @@
   // This is the backing store of copies.
   Register backing_reg;
   if (original.is_memory()) {
-    Result fresh = cgen_->allocator()->Allocate();
+    Result fresh = cgen()->allocator()->Allocate();
     ASSERT(fresh.is_valid());
     Use(fresh.reg(), new_backing_index);
     backing_reg = fresh.reg();
@@ -659,7 +626,7 @@
     case FrameElement::MEMORY: {
       // Emit code to load the original element's data into a register.
       // Push that register as a FrameElement on top of the frame.
-      Result fresh = cgen_->allocator()->Allocate();
+      Result fresh = cgen()->allocator()->Allocate();
       ASSERT(fresh.is_valid());
       FrameElement new_element =
           FrameElement::RegisterElement(fresh.reg(),
@@ -733,7 +700,7 @@
         // temp register.  Alternatively, allow copies to appear in
         // any order in the frame and lazily move the value down to
         // the slot.
-        Result temp = cgen_->allocator()->Allocate();
+        Result temp = cgen()->allocator()->Allocate();
         ASSERT(temp.is_valid());
         __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
         __ mov(Operand(ebp, fp_relative(index)), temp.reg());
@@ -780,7 +747,7 @@
 
     // The sync state of the former top element is correct (synced).
     // Emit code to move the value down in the frame.
-    Result temp = cgen_->allocator()->Allocate();
+    Result temp = cgen()->allocator()->Allocate();
     ASSERT(temp.is_valid());
     __ mov(temp.reg(), Operand(esp, 0));
     __ mov(Operand(ebp, fp_relative(index)), temp.reg());
@@ -805,7 +772,7 @@
 
 
 void VirtualFrame::PushTryHandler(HandlerType type) {
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
   // Grow the expression stack by handler size less two (the return address
   // is already pushed by a call instruction, and PushTryHandler from the
   // macro assembler will leave the top of stack in the eax register to be
@@ -818,9 +785,9 @@
 
 
 Result VirtualFrame::RawCallStub(CodeStub* stub) {
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
   __ CallStub(stub);
-  Result result = cgen_->allocator()->Allocate(eax);
+  Result result = cgen()->allocator()->Allocate(eax);
   ASSERT(result.is_valid());
   return result;
 }
@@ -861,9 +828,9 @@
 
 Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
   PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
   __ CallRuntime(f, arg_count);
-  Result result = cgen_->allocator()->Allocate(eax);
+  Result result = cgen()->allocator()->Allocate(eax);
   ASSERT(result.is_valid());
   return result;
 }
@@ -871,9 +838,9 @@
 
 Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
   PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
   __ CallRuntime(id, arg_count);
-  Result result = cgen_->allocator()->Allocate(eax);
+  Result result = cgen()->allocator()->Allocate(eax);
   ASSERT(result.is_valid());
   return result;
 }
@@ -883,9 +850,9 @@
                                    InvokeFlag flag,
                                    int arg_count) {
   PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
   __ InvokeBuiltin(id, flag);
-  Result result = cgen_->allocator()->Allocate(eax);
+  Result result = cgen()->allocator()->Allocate(eax);
   ASSERT(result.is_valid());
   return result;
 }
@@ -893,9 +860,9 @@
 
 Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
                                        RelocInfo::Mode rmode) {
-  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(cgen()->HasValidEntryRegisters());
   __ call(code, rmode);
-  Result result = cgen_->allocator()->Allocate(eax);
+  Result result = cgen()->allocator()->Allocate(eax);
   ASSERT(result.is_valid());
   return result;
 }
@@ -974,9 +941,8 @@
   // Arguments, receiver, and function name are on top of the frame.
   // The IC expects them on the stack.  It does not drop the function
   // name slot (but it does drop the rest).
-  Handle<Code> ic = (loop_nesting > 0)
-                    ? cgen_->ComputeCallInitializeInLoop(arg_count)
-                    : cgen_->ComputeCallInitialize(arg_count);
+  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
+  Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop);
   // Spill args, receiver, and function.  The call will drop args and
   // receiver.
   PrepareForCall(arg_count + 2, arg_count + 1);
@@ -998,7 +964,7 @@
   // Constructors are called with the number of arguments in register
   // eax for now. Another option would be to have separate construct
   // call trampolines per different arguments counts encountered.
-  Result num_args = cgen_->allocator()->Allocate(eax);
+  Result num_args = cgen()->allocator()->Allocate(eax);
   ASSERT(num_args.is_valid());
   __ Set(num_args.reg(), Immediate(arg_count));
 
@@ -1038,7 +1004,7 @@
   if (pop_needed) {
     stack_pointer_--;
     if (element.is_memory()) {
-      Result temp = cgen_->allocator()->Allocate();
+      Result temp = cgen()->allocator()->Allocate();
       ASSERT(temp.is_valid());
       temp.set_static_type(element.static_type());
       __ pop(temp.reg());
@@ -1065,7 +1031,7 @@
     // Memory elements could only be the backing store of a copy.
     // Allocate the original to a register.
     ASSERT(index <= stack_pointer_);
-    Result temp = cgen_->allocator()->Allocate();
+    Result temp = cgen()->allocator()->Allocate();
     ASSERT(temp.is_valid());
     Use(temp.reg(), index);
     FrameElement new_element =
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
index c4ade02..9811d5a 100644
--- a/src/ia32/virtual-frame-ia32.h
+++ b/src/ia32/virtual-frame-ia32.h
@@ -29,8 +29,10 @@
 #define V8_IA32_VIRTUAL_FRAME_IA32_H_
 
 #include "register-allocator.h"
+#include "scopes.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // Virtual frames
@@ -50,29 +52,39 @@
   // generator is being transformed.
   class SpilledScope BASE_EMBEDDED {
    public:
-    explicit SpilledScope(CodeGenerator* cgen);
+    SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
+      ASSERT(cgen()->has_valid_frame());
+      cgen()->frame()->SpillAll();
+      cgen()->set_in_spilled_code(true);
+    }
 
-    ~SpilledScope();
+    ~SpilledScope() {
+      cgen()->set_in_spilled_code(previous_state_);
+    }
 
    private:
-    CodeGenerator* cgen_;
     bool previous_state_;
+
+    CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
   };
 
   // An illegal index into the virtual frame.
   static const int kIllegalIndex = -1;
 
   // Construct an initial virtual frame on entry to a JS function.
-  explicit VirtualFrame(CodeGenerator* cgen);
+  VirtualFrame();
 
   // Construct a virtual frame as a clone of an existing one.
   explicit VirtualFrame(VirtualFrame* original);
 
+  CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+  MacroAssembler* masm() { return cgen()->masm(); }
+
   // Create a duplicate of an existing valid frame element.
   FrameElement CopyElementAt(int index);
 
   // The height of the virtual expression stack.
-  int height() const {
+  int height() {
     return elements_.length() - expression_base_index();
   }
 
@@ -98,7 +110,12 @@
   // match an external frame effect (examples include a call removing
   // its arguments, and exiting a try/catch removing an exception
   // handler).  No code will be emitted.
-  void Forget(int count);
+  void Forget(int count) {
+    ASSERT(count >= 0);
+    ASSERT(stack_pointer_ == elements_.length() - 1);
+    stack_pointer_ -= count;
+    ForgetElements(count);
+  }
 
   // Forget count elements from the top of the frame without adjusting
   // the stack pointer downward.  This is used, for example, before
@@ -109,7 +126,9 @@
   void SpillAll();
 
   // Spill all occurrences of a specific register from the frame.
-  void Spill(Register reg);
+  void Spill(Register reg) {
+    if (is_used(reg)) SpillElementAt(register_index(reg));
+  }
 
   // Spill all occurrences of an arbitrary register if possible.  Return the
   // register spilled or no_reg if it was not possible to free any register
@@ -138,7 +157,7 @@
   // registers.  Used when the code generator's frame is switched from this
   // one to NULL by an unconditional jump.
   void DetachFromCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen_->allocator();
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
     for (int i = 0; i < kNumRegisters; i++) {
       if (is_used(i)) {
         Register temp = { i };
@@ -152,7 +171,7 @@
   // Used when a code generator's frame is switched from NULL to this one by
   // binding a label.
   void AttachToCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen_->allocator();
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
     for (int i = 0; i < kNumRegisters; i++) {
       if (is_used(i)) {
         Register temp = { i };
@@ -174,7 +193,7 @@
   void PrepareForReturn();
 
   // Allocate and initialize the frame-allocated locals.
-  void AllocateStackSlots(int count);
+  void AllocateStackSlots();
 
   // An element of the expression stack as an assembly operand.
   Operand ElementAt(int index) const {
@@ -200,9 +219,9 @@
   }
 
   // A frame-allocated local as an assembly operand.
-  Operand LocalAt(int index) const {
+  Operand LocalAt(int index) {
     ASSERT(0 <= index);
-    ASSERT(index < local_count_);
+    ASSERT(index < local_count());
     return Operand(ebp, kLocal0Offset - index * kPointerSize);
   }
 
@@ -238,10 +257,10 @@
   void RestoreContextRegister();
 
   // A parameter as an assembly operand.
-  Operand ParameterAt(int index) const {
+  Operand ParameterAt(int index) {
     ASSERT(-1 <= index);  // -1 is the receiver.
-    ASSERT(index < parameter_count_);
-    return Operand(ebp, (1 + parameter_count_ - index) * kPointerSize);
+    ASSERT(index <= parameter_count());
+    return Operand(ebp, (1 + parameter_count() - index) * kPointerSize);
   }
 
   // Push a copy of the value of a parameter frame slot on top of the frame.
@@ -263,14 +282,17 @@
   }
 
   // The receiver frame slot.
-  Operand Receiver() const { return ParameterAt(-1); }
+  Operand Receiver() { return ParameterAt(-1); }
 
   // Push a try-catch or try-finally handler on top of the virtual frame.
   void PushTryHandler(HandlerType type);
 
   // Call stub given the number of arguments it expects on (and
   // removes from) the stack.
-  Result CallStub(CodeStub* stub, int arg_count);
+  Result CallStub(CodeStub* stub, int arg_count) {
+    PrepareForCall(arg_count, arg_count);
+    return RawCallStub(stub);
+  }
 
   // Call stub that takes a single argument passed in eax.  The
   // argument is given as a result which does not have to be eax or
@@ -354,7 +376,15 @@
 
   // Pushing a result invalidates it (its contents become owned by the
   // frame).
-  void Push(Result* result);
+  void Push(Result* result) {
+    if (result->is_register()) {
+      Push(result->reg(), result->static_type());
+    } else {
+      ASSERT(result->is_constant());
+      Push(result->handle());
+    }
+    result->Unuse();
+  }
 
   // Nip removes zero or more elements from immediately below the top
   // of the frame, leaving the previous top-of-frame value on top of
@@ -369,70 +399,69 @@
   static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
   static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
 
-  CodeGenerator* cgen_;
-  MacroAssembler* masm_;
-
   ZoneList<FrameElement> elements_;
 
-  // The number of frame-allocated locals and parameters respectively.
-  int parameter_count_;
-  int local_count_;
-
   // The index of the element that is at the processor's stack pointer
   // (the esp register).
   int stack_pointer_;
 
-  // The index of the element that is at the processor's frame pointer
-  // (the ebp register).
-  int frame_pointer_;
-
   // The index of the register frame element using each register, or
   // kIllegalIndex if a register is not on the frame.
   int register_locations_[kNumRegisters];
 
+  // The number of frame-allocated locals and parameters respectively.
+  int parameter_count() { return cgen()->scope()->num_parameters(); }
+  int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+  // The index of the element that is at the processor's frame pointer
+  // (the ebp register).  The parameters, receiver, and return address
+  // are below the frame pointer.
+  int frame_pointer() { return parameter_count() + 2; }
+
   // The index of the first parameter.  The receiver lies below the first
   // parameter.
-  int param0_index() const { return 1; }
+  int param0_index() { return 1; }
 
-  // The index of the context slot in the frame.
-  int context_index() const {
-    ASSERT(frame_pointer_ != kIllegalIndex);
-    return frame_pointer_ + 1;
-  }
+  // The index of the context slot in the frame.  It is immediately
+  // above the frame pointer.
+  int context_index() { return frame_pointer() + 1; }
 
-  // The index of the function slot in the frame.  It lies above the context
-  // slot.
-  int function_index() const {
-    ASSERT(frame_pointer_ != kIllegalIndex);
-    return frame_pointer_ + 2;
-  }
+  // The index of the function slot in the frame.  It is above the frame
+  // pointer and the context slot.
+  int function_index() { return frame_pointer() + 2; }
 
-  // The index of the first local.  Between the parameters and the locals
-  // lie the return address, the saved frame pointer, the context, and the
-  // function.
-  int local0_index() const {
-    ASSERT(frame_pointer_ != kIllegalIndex);
-    return frame_pointer_ + 3;
-  }
+  // The index of the first local.  Between the frame pointer and the
+  // locals lie the context and the function.
+  int local0_index() { return frame_pointer() + 3; }
 
   // The index of the base of the expression stack.
-  int expression_base_index() const { return local0_index() + local_count_; }
+  int expression_base_index() { return local0_index() + local_count(); }
 
   // Convert a frame index into a frame pointer relative offset into the
   // actual stack.
-  int fp_relative(int index) const {
-    return (frame_pointer_ - index) * kPointerSize;
+  int fp_relative(int index) {
+    ASSERT(index < elements_.length());
+    ASSERT(frame_pointer() < elements_.length());  // FP is on the frame.
+    return (frame_pointer() - index) * kPointerSize;
   }
 
   // Record an occurrence of a register in the virtual frame.  This has the
   // effect of incrementing the register's external reference count and
   // of updating the index of the register's location in the frame.
-  void Use(Register reg, int index);
+  void Use(Register reg, int index) {
+    ASSERT(!is_used(reg));
+    register_locations_[reg.code()] = index;
+    cgen()->allocator()->Use(reg);
+  }
 
   // Record that a register reference has been dropped from the frame.  This
   // decrements the register's external reference count and invalidates the
   // index of the register's location in the frame.
-  void Unuse(Register reg);
+  void Unuse(Register reg) {
+    ASSERT(register_locations_[reg.code()] != kIllegalIndex);
+    register_locations_[reg.code()] = kIllegalIndex;
+    cgen()->allocator()->Unuse(reg);
+  }
 
   // Spill the element at a particular index---write it to memory if
   // necessary, free any associated register, and forget its value if
@@ -511,6 +540,7 @@
   friend class JumpTarget;
 };
 
+
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_VIRTUAL_FRAME_IA32_H_
diff --git a/src/ic-inl.h b/src/ic-inl.h
index bb56962..08304d8 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -32,7 +32,8 @@
 #include "debug.h"
 #include "macro-assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 Address IC::address() {
diff --git a/src/ic.cc b/src/ic.cc
index ccdf3ca..2d27661 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,13 +35,13 @@
 #include "runtime.h"
 #include "stub-cache.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #ifdef DEBUG
-static char TransitionMarkFromState(IC::State state) {
+static const char TransitionMarkFromState(IC::State state) {
   switch (state) {
     case UNINITIALIZED: return '0';
-    case UNINITIALIZED_IN_LOOP: return 'L';
     case PREMONOMORPHIC: return 'P';
     case MONOMORPHIC: return '1';
     case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
@@ -60,12 +60,14 @@
 void IC::TraceIC(const char* type,
                  Handle<String> name,
                  State old_state,
-                 Code* new_target) {
+                 Code* new_target,
+                 const char* extra_info) {
   if (FLAG_trace_ic) {
     State new_state = StateFrom(new_target, Heap::undefined_value());
-    PrintF("[%s (%c->%c) ", type,
+    PrintF("[%s (%c->%c)%s", type,
            TransitionMarkFromState(old_state),
-           TransitionMarkFromState(new_state));
+           TransitionMarkFromState(new_state),
+           extra_info);
     name->Print();
     PrintF("]\n");
   }
@@ -226,8 +228,10 @@
 
 void CallIC::Clear(Address address, Code* target) {
   State state = target->ic_state();
-  if (state == UNINITIALIZED || state == UNINITIALIZED_IN_LOOP) return;
-  Code* code = StubCache::FindCallInitialize(target->arguments_count());
+  InLoopFlag in_loop = target->ic_in_loop();
+  if (state == UNINITIALIZED) return;
+  Code* code =
+      StubCache::FindCallInitialize(target->arguments_count(), in_loop);
   SetTargetAtAddress(address, code);
 }
 
@@ -390,21 +394,22 @@
 
   // Compute the number of arguments.
   int argc = target()->arguments_count();
+  InLoopFlag in_loop = target()->ic_in_loop();
   Object* code = NULL;
 
   if (state == UNINITIALIZED) {
     // This is the first time we execute this inline cache.
     // Set the target to the pre monomorphic stub to delay
     // setting the monomorphic state.
-    code = StubCache::ComputeCallPreMonomorphic(argc);
+    code = StubCache::ComputeCallPreMonomorphic(argc, in_loop);
   } else if (state == MONOMORPHIC) {
-    code = StubCache::ComputeCallMegamorphic(argc);
+    code = StubCache::ComputeCallMegamorphic(argc, in_loop);
   } else {
     // Compute monomorphic stub.
     switch (lookup->type()) {
       case FIELD: {
         int index = lookup->GetFieldIndex();
-        code = StubCache::ComputeCallField(argc, *name, *object,
+        code = StubCache::ComputeCallField(argc, in_loop, *name, *object,
                                            lookup->holder(), index);
         break;
       }
@@ -413,7 +418,7 @@
         // call; used for rewriting to monomorphic state and making sure
         // that the code stub is in the stub cache.
         JSFunction* function = lookup->GetConstantFunction();
-        code = StubCache::ComputeCallConstant(argc, *name, *object,
+        code = StubCache::ComputeCallConstant(argc, in_loop, *name, *object,
                                               lookup->holder(), function);
         break;
       }
@@ -425,7 +430,7 @@
         if (!object->IsJSObject()) return;
         Handle<JSObject> receiver = Handle<JSObject>::cast(object);
         if (lookup->holder() != *receiver) return;
-        code = StubCache::ComputeCallNormal(argc, *name, *receiver);
+        code = StubCache::ComputeCallNormal(argc, in_loop, *name, *receiver);
         break;
       }
       case INTERCEPTOR: {
@@ -443,14 +448,15 @@
   if (code->IsFailure()) return;
 
   // Patch the call site depending on the state of the cache.
-  if (state == UNINITIALIZED || state == UNINITIALIZED_IN_LOOP ||
-      state == PREMONOMORPHIC || state == MONOMORPHIC ||
+  if (state == UNINITIALIZED ||
+      state == PREMONOMORPHIC ||
+      state == MONOMORPHIC ||
       state == MONOMORPHIC_PROTOTYPE_FAILURE) {
     set_target(Code::cast(code));
   }
 
 #ifdef DEBUG
-  TraceIC("CallIC", name, state, target());
+  TraceIC("CallIC", name, state, target(), in_loop ? " (in-loop)" : "");
 #endif
 }
 
@@ -1088,14 +1094,27 @@
   IC::State state = IC::StateFrom(ic.target(), args[0]);
   Object* result =
       ic.LoadFunction(state, args.at<Object>(0), args.at<String>(1));
-  if (state != UNINITIALIZED_IN_LOOP || !result->IsJSFunction())
-    return result;
 
-  // Compile the function with the knowledge that it's called from
-  // within a loop. This enables further optimization of the function.
+  // The first time the inline cache is updated may be the first time the
+  // function it references gets called.  If the function was lazily compiled
+  // then the first call will trigger a compilation.  We check for this case
+  // and we do the compilation immediately, instead of waiting for the stub
+  // currently attached to the JSFunction object to trigger compilation.  We
+  // do this in the case where we know that the inline cache is inside a loop,
+  // because then we know that we want to optimize the function.
+  if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
+    return result;
+  }
+
+  // Compile now with optimization.
   HandleScope scope;
   Handle<JSFunction> function = Handle<JSFunction>(JSFunction::cast(result));
-  if (!function->is_compiled()) CompileLazyInLoop(function, CLEAR_EXCEPTION);
+  InLoopFlag in_loop = ic.target()->ic_in_loop();
+  if (in_loop == IN_LOOP) {
+    CompileLazyInLoop(function, CLEAR_EXCEPTION);
+  } else {
+    CompileLazy(function, CLEAR_EXCEPTION);
+  }
   return *function;
 }
 
diff --git a/src/ic.h b/src/ic.h
index 11fd604..bd94fd8 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,8 @@
 
 #include "assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // IC_UTIL_LIST defines all utility functions called from generated
 // inline caching code. The argument for the macro, ICU, is the function name.
@@ -120,7 +121,8 @@
   static void TraceIC(const char* type,
                       Handle<String> name,
                       State old_state,
-                      Code* new_target);
+                      Code* new_target,
+                      const char* extra_info = "");
 #endif
 
   static Failure* TypeError(const char* type,
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index 77bcc90..355fae4 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -36,7 +36,8 @@
 #include "interpreter-irregexp.h"
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 static unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize;
diff --git a/src/interpreter-irregexp.h b/src/interpreter-irregexp.h
index c65cb9e..0ad8846 100644
--- a/src/interpreter-irregexp.h
+++ b/src/interpreter-irregexp.h
@@ -30,7 +30,8 @@
 #ifndef V8_INTERPRETER_IRREGEXP_H_
 #define V8_INTERPRETER_IRREGEXP_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 class IrregexpInterpreter {
diff --git a/src/json-delay.js b/src/json-delay.js
index 90150c6..1a6f008 100644
--- a/src/json-delay.js
+++ b/src/json-delay.js
@@ -29,7 +29,7 @@
 
 function ParseJSONUnfiltered(text) {
   var s = $String(text);
-  var f = %CompileString("(" + text + ")", -1, true);
+  var f = %CompileString("(" + text + ")", true);
   return f();
 }
 
diff --git a/src/jsregexp-inl.h b/src/jsregexp-inl.h
index 09c4c8f..cc90bd1 100644
--- a/src/jsregexp-inl.h
+++ b/src/jsregexp-inl.h
@@ -33,7 +33,8 @@
 #include "regexp-macro-assembler.h"
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 template <typename C>
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 5589b77..7500bf2 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -43,7 +43,7 @@
 #include "regexp-macro-assembler-irregexp.h"
 #include "regexp-stack.h"
 
-#ifdef V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32
 #include "ia32/macro-assembler-ia32.h"
 #include "ia32/regexp-macro-assembler-ia32.h"
 #elif V8_TARGET_ARCH_X64
@@ -56,7 +56,8 @@
 #include "interpreter-irregexp.h"
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor,
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 9fa0ece..18bd19b 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -28,7 +28,8 @@
 #ifndef V8_JSREGEXP_H_
 #define V8_JSREGEXP_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 class RegExpMacroAssembler;
diff --git a/src/jump-target-inl.h b/src/jump-target-inl.h
index 186ace3..081821e 100644
--- a/src/jump-target-inl.h
+++ b/src/jump-target-inl.h
@@ -28,7 +28,12 @@
 #ifndef V8_JUMP_TARGET_INL_H_
 #define V8_JUMP_TARGET_INL_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
+
+CodeGenerator* JumpTarget::cgen() {
+  return CodeGeneratorScope::Current();
+}
 
 void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
   entry_frame_->elements_[index].clear_copied();
diff --git a/src/jump-target.cc b/src/jump-target.cc
index 54ce57a..b89547b 100644
--- a/src/jump-target.cc
+++ b/src/jump-target.cc
@@ -31,7 +31,8 @@
 #include "jump-target-inl.h"
 #include "register-allocator-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // JumpTarget implementation.
@@ -39,36 +40,6 @@
 bool JumpTarget::compiling_deferred_code_ = false;
 
 
-JumpTarget::JumpTarget(CodeGenerator* cgen, Directionality direction)
-    : cgen_(cgen),
-      direction_(direction),
-      reaching_frames_(0),
-      merge_labels_(0),
-      entry_frame_(NULL) {
-  ASSERT(cgen != NULL);
-  masm_ = cgen->masm();
-}
-
-
-JumpTarget::JumpTarget()
-    : cgen_(NULL),
-      masm_(NULL),
-      direction_(FORWARD_ONLY),
-      reaching_frames_(0),
-      merge_labels_(0),
-      entry_frame_(NULL) {
-}
-
-
-void JumpTarget::Initialize(CodeGenerator* cgen, Directionality direction) {
-  ASSERT(cgen != NULL);
-  ASSERT(cgen_ == NULL);
-  cgen_ = cgen;
-  masm_ = cgen->masm();
-  direction_ = direction;
-}
-
-
 void JumpTarget::Unuse() {
   reaching_frames_.Clear();
   merge_labels_.Clear();
@@ -151,7 +122,7 @@
   // Build the new frame.  A freshly allocated frame has memory elements
   // for the parameters and some platform-dependent elements (e.g.,
   // return address).  Replace those first.
-  entry_frame_ = new VirtualFrame(cgen_);
+  entry_frame_ = new VirtualFrame();
   int index = 0;
   for (; index < entry_frame_->elements_.length(); index++) {
     FrameElement* target = elements[index];
@@ -245,10 +216,6 @@
     }
   }
 
-  // Fill in the other fields of the entry frame.
-  entry_frame_->local_count_ = initial_frame->local_count_;
-  entry_frame_->frame_pointer_ = initial_frame->frame_pointer_;
-
   // The stack pointer is at the highest synced element or the base of
   // the expression stack.
   int stack_pointer = length - 1;
@@ -266,31 +233,28 @@
 
 
 void JumpTarget::Jump(Result* arg) {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen()->has_valid_frame());
 
-  cgen_->frame()->Push(arg);
+  cgen()->frame()->Push(arg);
   DoJump();
 }
 
 
 void JumpTarget::Jump(Result* arg0, Result* arg1) {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen()->has_valid_frame());
 
-  cgen_->frame()->Push(arg0);
-  cgen_->frame()->Push(arg1);
+  cgen()->frame()->Push(arg0);
+  cgen()->frame()->Push(arg1);
   DoJump();
 }
 
 
 void JumpTarget::Jump(Result* arg0, Result* arg1, Result* arg2) {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen()->has_valid_frame());
 
-  cgen_->frame()->Push(arg0);
-  cgen_->frame()->Push(arg1);
-  cgen_->frame()->Push(arg2);
+  cgen()->frame()->Push(arg0);
+  cgen()->frame()->Push(arg1);
+  cgen()->frame()->Push(arg2);
   DoJump();
 }
 
@@ -316,35 +280,33 @@
 #endif
 
 void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen()->has_valid_frame());
 
   // We want to check that non-frame registers at the call site stay in
   // the same registers on the fall-through branch.
   DECLARE_ARGCHECK_VARS(arg);
 
-  cgen_->frame()->Push(arg);
+  cgen()->frame()->Push(arg);
   DoBranch(cc, hint);
-  *arg = cgen_->frame()->Pop();
+  *arg = cgen()->frame()->Pop();
 
   ASSERT_ARGCHECK(arg);
 }
 
 
 void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->frame() != NULL);
+  ASSERT(cgen()->frame() != NULL);
 
   // We want to check that non-frame registers at the call site stay in
   // the same registers on the fall-through branch.
   DECLARE_ARGCHECK_VARS(arg0);
   DECLARE_ARGCHECK_VARS(arg1);
 
-  cgen_->frame()->Push(arg0);
-  cgen_->frame()->Push(arg1);
+  cgen()->frame()->Push(arg0);
+  cgen()->frame()->Push(arg1);
   DoBranch(cc, hint);
-  *arg1 = cgen_->frame()->Pop();
-  *arg0 = cgen_->frame()->Pop();
+  *arg1 = cgen()->frame()->Pop();
+  *arg0 = cgen()->frame()->Pop();
 
   ASSERT_ARGCHECK(arg0);
   ASSERT_ARGCHECK(arg1);
@@ -356,8 +318,7 @@
                         Result* arg1,
                         Result* arg2,
                         Hint hint) {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->frame() != NULL);
+  ASSERT(cgen()->frame() != NULL);
 
   // We want to check that non-frame registers at the call site stay in
   // the same registers on the fall-through branch.
@@ -365,13 +326,13 @@
   DECLARE_ARGCHECK_VARS(arg1);
   DECLARE_ARGCHECK_VARS(arg2);
 
-  cgen_->frame()->Push(arg0);
-  cgen_->frame()->Push(arg1);
-  cgen_->frame()->Push(arg2);
+  cgen()->frame()->Push(arg0);
+  cgen()->frame()->Push(arg1);
+  cgen()->frame()->Push(arg2);
   DoBranch(cc, hint);
-  *arg2 = cgen_->frame()->Pop();
-  *arg1 = cgen_->frame()->Pop();
-  *arg0 = cgen_->frame()->Pop();
+  *arg2 = cgen()->frame()->Pop();
+  *arg1 = cgen()->frame()->Pop();
+  *arg0 = cgen()->frame()->Pop();
 
   ASSERT_ARGCHECK(arg0);
   ASSERT_ARGCHECK(arg1);
@@ -385,8 +346,7 @@
                         Result* arg2,
                         Result* arg3,
                         Hint hint) {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->frame() != NULL);
+  ASSERT(cgen()->frame() != NULL);
 
   // We want to check that non-frame registers at the call site stay in
   // the same registers on the fall-through branch.
@@ -395,15 +355,15 @@
   DECLARE_ARGCHECK_VARS(arg2);
   DECLARE_ARGCHECK_VARS(arg3);
 
-  cgen_->frame()->Push(arg0);
-  cgen_->frame()->Push(arg1);
-  cgen_->frame()->Push(arg2);
-  cgen_->frame()->Push(arg3);
+  cgen()->frame()->Push(arg0);
+  cgen()->frame()->Push(arg1);
+  cgen()->frame()->Push(arg2);
+  cgen()->frame()->Push(arg3);
   DoBranch(cc, hint);
-  *arg3 = cgen_->frame()->Pop();
-  *arg2 = cgen_->frame()->Pop();
-  *arg1 = cgen_->frame()->Pop();
-  *arg0 = cgen_->frame()->Pop();
+  *arg3 = cgen()->frame()->Pop();
+  *arg2 = cgen()->frame()->Pop();
+  *arg1 = cgen()->frame()->Pop();
+  *arg0 = cgen()->frame()->Pop();
 
   ASSERT_ARGCHECK(arg0);
   ASSERT_ARGCHECK(arg1);
@@ -413,15 +373,14 @@
 
 
 void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen()->has_valid_frame());
 
-  int count = cgen_->frame()->height() - expected_height_;
+  int count = cgen()->frame()->height() - expected_height_;
   if (count > 0) {
     // We negate and branch here rather than using DoBranch's negate
     // and branch.  This gives us a hook to remove statement state
     // from the frame.
-    JumpTarget fall_through(cgen_);
+    JumpTarget fall_through;
     // Branch to fall through will not negate, because it is a
     // forward-only target.
     fall_through.Branch(NegateCondition(cc), NegateHint(hint));
@@ -429,9 +388,9 @@
     fall_through.Bind();
   } else {
     DECLARE_ARGCHECK_VARS(arg);
-    cgen_->frame()->Push(arg);
+    cgen()->frame()->Push(arg);
     DoBranch(cc, hint);
-    *arg = cgen_->frame()->Pop();
+    *arg = cgen()->frame()->Pop();
     ASSERT_ARGCHECK(arg);
   }
 }
@@ -446,26 +405,22 @@
 
 
 void JumpTarget::Bind(Result* arg, int mergable_elements) {
-  ASSERT(cgen_ != NULL);
-
-  if (cgen_->has_valid_frame()) {
-    cgen_->frame()->Push(arg);
+  if (cgen()->has_valid_frame()) {
+    cgen()->frame()->Push(arg);
   }
   DoBind(mergable_elements);
-  *arg = cgen_->frame()->Pop();
+  *arg = cgen()->frame()->Pop();
 }
 
 
 void JumpTarget::Bind(Result* arg0, Result* arg1, int mergable_elements) {
-  ASSERT(cgen_ != NULL);
-
-  if (cgen_->has_valid_frame()) {
-    cgen_->frame()->Push(arg0);
-    cgen_->frame()->Push(arg1);
+  if (cgen()->has_valid_frame()) {
+    cgen()->frame()->Push(arg0);
+    cgen()->frame()->Push(arg1);
   }
   DoBind(mergable_elements);
-  *arg1 = cgen_->frame()->Pop();
-  *arg0 = cgen_->frame()->Pop();
+  *arg1 = cgen()->frame()->Pop();
+  *arg0 = cgen()->frame()->Pop();
 }
 
 
@@ -473,17 +428,15 @@
                       Result* arg1,
                       Result* arg2,
                       int mergable_elements) {
-  ASSERT(cgen_ != NULL);
-
-  if (cgen_->has_valid_frame()) {
-    cgen_->frame()->Push(arg0);
-    cgen_->frame()->Push(arg1);
-    cgen_->frame()->Push(arg2);
+  if (cgen()->has_valid_frame()) {
+    cgen()->frame()->Push(arg0);
+    cgen()->frame()->Push(arg1);
+    cgen()->frame()->Push(arg2);
   }
   DoBind(mergable_elements);
-  *arg2 = cgen_->frame()->Pop();
-  *arg1 = cgen_->frame()->Pop();
-  *arg0 = cgen_->frame()->Pop();
+  *arg2 = cgen()->frame()->Pop();
+  *arg1 = cgen()->frame()->Pop();
+  *arg0 = cgen()->frame()->Pop();
 }
 
 
@@ -492,19 +445,17 @@
                       Result* arg2,
                       Result* arg3,
                       int mergable_elements) {
-  ASSERT(cgen_ != NULL);
-
-  if (cgen_->has_valid_frame()) {
-    cgen_->frame()->Push(arg0);
-    cgen_->frame()->Push(arg1);
-    cgen_->frame()->Push(arg2);
-    cgen_->frame()->Push(arg3);
+  if (cgen()->has_valid_frame()) {
+    cgen()->frame()->Push(arg0);
+    cgen()->frame()->Push(arg1);
+    cgen()->frame()->Push(arg2);
+    cgen()->frame()->Push(arg3);
   }
   DoBind(mergable_elements);
-  *arg3 = cgen_->frame()->Pop();
-  *arg2 = cgen_->frame()->Pop();
-  *arg1 = cgen_->frame()->Pop();
-  *arg0 = cgen_->frame()->Pop();
+  *arg3 = cgen()->frame()->Pop();
+  *arg2 = cgen()->frame()->Pop();
+  *arg1 = cgen()->frame()->Pop();
+  *arg0 = cgen()->frame()->Pop();
 }
 
 
@@ -520,25 +471,20 @@
 // -------------------------------------------------------------------------
 // BreakTarget implementation.
 
-void BreakTarget::Initialize(CodeGenerator* cgen, Directionality direction) {
-  JumpTarget::Initialize(cgen, direction);
-  ASSERT(cgen_->has_valid_frame());
-  expected_height_ = cgen_->frame()->height();
+void BreakTarget::set_direction(Directionality direction) {
+  JumpTarget::set_direction(direction);
+  ASSERT(cgen()->has_valid_frame());
+  expected_height_ = cgen()->frame()->height();
 }
 
 
 void BreakTarget::CopyTo(BreakTarget* destination) {
   ASSERT(destination != NULL);
-  destination->cgen_ = cgen_;
-  destination->masm_ = masm_;
   destination->direction_ = direction_;
-  destination->reaching_frames_.Clear();
-  destination->merge_labels_.Clear();
-  ASSERT(reaching_frames_.length() == merge_labels_.length());
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    destination->reaching_frames_.Add(reaching_frames_[i]);
-    destination->merge_labels_.Add(merge_labels_[i]);
-  }
+  destination->reaching_frames_.Rewind(0);
+  destination->reaching_frames_.AddAll(reaching_frames_);
+  destination->merge_labels_.Rewind(0);
+  destination->merge_labels_.AddAll(merge_labels_);
   destination->entry_frame_ = entry_frame_;
   destination->entry_label_ = entry_label_;
   destination->expected_height_ = expected_height_;
@@ -546,36 +492,33 @@
 
 
 void BreakTarget::Jump() {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen()->has_valid_frame());
 
   // Drop leftover statement state from the frame before merging.
-  cgen_->frame()->ForgetElements(cgen_->frame()->height() - expected_height_);
+  cgen()->frame()->ForgetElements(cgen()->frame()->height() - expected_height_);
   DoJump();
 }
 
 
 void BreakTarget::Jump(Result* arg) {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen()->has_valid_frame());
 
   // Drop leftover statement state from the frame before merging.
-  cgen_->frame()->ForgetElements(cgen_->frame()->height() - expected_height_);
-  cgen_->frame()->Push(arg);
+  cgen()->frame()->ForgetElements(cgen()->frame()->height() - expected_height_);
+  cgen()->frame()->Push(arg);
   DoJump();
 }
 
 
 void BreakTarget::Branch(Condition cc, Hint hint) {
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
+  ASSERT(cgen()->has_valid_frame());
 
-  int count = cgen_->frame()->height() - expected_height_;
+  int count = cgen()->frame()->height() - expected_height_;
   if (count > 0) {
     // We negate and branch here rather than using DoBranch's negate
     // and branch.  This gives us a hook to remove statement state
     // from the frame.
-    JumpTarget fall_through(cgen_);
+    JumpTarget fall_through;
     // Branch to fall through will not negate, because it is a
     // forward-only target.
     fall_through.Branch(NegateCondition(cc), NegateHint(hint));
@@ -589,7 +532,6 @@
 
 void BreakTarget::Bind(int mergable_elements) {
 #ifdef DEBUG
-  ASSERT(cgen_ != NULL);
   // All the forward-reaching frames should have been adjusted at the
   // jumps to this target.
   for (int i = 0; i < reaching_frames_.length(); i++) {
@@ -600,8 +542,9 @@
   // Drop leftover statement state from the frame before merging, even
   // on the fall through.  This is so we can bind the return target
   // with state on the frame.
-  if (cgen_->has_valid_frame()) {
-    cgen_->frame()->ForgetElements(cgen_->frame()->height() - expected_height_);
+  if (cgen()->has_valid_frame()) {
+    int count = cgen()->frame()->height() - expected_height_;
+    cgen()->frame()->ForgetElements(count);
   }
   DoBind(mergable_elements);
 }
@@ -609,7 +552,6 @@
 
 void BreakTarget::Bind(Result* arg, int mergable_elements) {
 #ifdef DEBUG
-  ASSERT(cgen_ != NULL);
   // All the forward-reaching frames should have been adjusted at the
   // jumps to this target.
   for (int i = 0; i < reaching_frames_.length(); i++) {
@@ -620,12 +562,13 @@
   // Drop leftover statement state from the frame before merging, even
   // on the fall through.  This is so we can bind the return target
   // with state on the frame.
-  if (cgen_->has_valid_frame()) {
-    cgen_->frame()->ForgetElements(cgen_->frame()->height() - expected_height_);
-    cgen_->frame()->Push(arg);
+  if (cgen()->has_valid_frame()) {
+    int count = cgen()->frame()->height() - expected_height_;
+    cgen()->frame()->ForgetElements(count);
+    cgen()->frame()->Push(arg);
   }
   DoBind(mergable_elements);
-  *arg = cgen_->frame()->Pop();
+  *arg = cgen()->frame()->Pop();
 }
 
 
@@ -644,25 +587,14 @@
 
   // The original's state is reset.
   shadowed->Unuse();
-  ASSERT(cgen_ != NULL);
-  ASSERT(cgen_->has_valid_frame());
-  shadowed->set_expected_height(cgen_->frame()->height());
-
-  // Setting the code generator to null prevents the shadow target from
-  // being used until shadowing stops.
-  cgen_ = NULL;
-  masm_ = NULL;
+  ASSERT(cgen()->has_valid_frame());
+  shadowed->set_expected_height(cgen()->frame()->height());
 }
 
 
 void ShadowTarget::StopShadowing() {
   ASSERT(is_shadowing_);
 
-  // This target does not have a valid code generator yet.
-  cgen_ = other_target_->code_generator();
-  ASSERT(cgen_ != NULL);
-  masm_ = cgen_->masm();
-
   // The states of this target, which was shadowed, and the original
   // target, which was shadowing, are swapped.
   BreakTarget temp;
diff --git a/src/jump-target.h b/src/jump-target.h
index 1cde326..7585faf 100644
--- a/src/jump-target.h
+++ b/src/jump-target.h
@@ -28,7 +28,8 @@
 #ifndef V8_JUMP_TARGET_H_
 #define V8_JUMP_TARGET_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Forward declarations.
 class FrameElement;
@@ -56,31 +57,34 @@
   // Forward-only jump targets can only be reached by forward CFG edges.
   enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
 
-  // Construct a jump target with a given code generator used to generate
-  // code and to provide access to a current frame.
-  explicit JumpTarget(CodeGenerator* cgen,
-                      Directionality direction = FORWARD_ONLY);
+  // Construct a jump target used to generate code and to provide
+  // access to a current frame.
+  explicit JumpTarget(Directionality direction)
+      : direction_(direction),
+        reaching_frames_(0),
+        merge_labels_(0),
+        entry_frame_(NULL) {
+  }
 
-  // Construct a jump target without a code generator.  A code
-  // generator must be supplied before using the jump target as a
-  // label.  This is useful, eg, when break targets are embedded in
-  // AST nodes.
-  JumpTarget();
+  // Construct a jump target.
+  JumpTarget()
+      : direction_(FORWARD_ONLY),
+        reaching_frames_(0),
+        merge_labels_(0),
+        entry_frame_(NULL) {
+  }
 
   virtual ~JumpTarget() {}
 
-  // Supply a code generator and directionality to an already
-  // constructed jump target.  This function expects to be given a
-  // non-null code generator, and to be called only when the code
-  // generator is not yet set.
-  virtual void Initialize(CodeGenerator* cgen,
-                          Directionality direction = FORWARD_ONLY);
+  // Set the direction of the jump target.
+  virtual void set_direction(Directionality direction) {
+    direction_ = direction;
+  }
 
   // Treat the jump target as a fresh one.  The state is reset.
   void Unuse();
 
-  // Accessors.
-  CodeGenerator* code_generator() const { return cgen_; }
+  inline CodeGenerator* cgen();
 
   Label* entry_label() { return &entry_label_; }
 
@@ -163,12 +167,6 @@
   }
 
  protected:
-  // The code generator gives access to its current frame.
-  CodeGenerator* cgen_;
-
-  // Used to emit code.
-  MacroAssembler* masm_;
-
   // Directionality flag set at initialization time.
   Directionality direction_;
 
@@ -224,20 +222,13 @@
 
 class BreakTarget : public JumpTarget {
  public:
-  // Construct a break target without a code generator.  A code
-  // generator must be supplied before using the break target as a
-  // label.  This is useful, eg, when break targets are embedded in AST
-  // nodes.
+  // Construct a break target.
   BreakTarget() {}
 
   virtual ~BreakTarget() {}
 
-  // Supply a code generator, expected expression stack height, and
-  // directionality to an already constructed break target.  This
-  // function expects to be given a non-null code generator, and to be
-  // called only when the code generator is not yet set.
-  virtual void Initialize(CodeGenerator* cgen,
-                          Directionality direction = FORWARD_ONLY);
+  // Set the direction of the break target.
+  virtual void set_direction(Directionality direction);
 
   // Copy the state of this break target to the destination.  The
   // lists of forward-reaching frames and merge-point labels are
diff --git a/src/list-inl.h b/src/list-inl.h
index e3d251f..e41db11 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -30,7 +30,8 @@
 
 #include "list.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 template<typename T, class P>
@@ -43,6 +44,17 @@
 }
 
 
+template<typename T, class P>
+void List<T, P>::AddAll(const List<T, P>& other) {
+  int result_length = length_ + other.length_;
+  if (capacity_ < result_length) Resize(result_length);
+  for (int i = 0; i < other.length_; i++) {
+    data_[length_ + i] = other.data_[i];
+  }
+  length_ = result_length;
+}
+
+
 // Use two layers of inlining so that the non-inlined function can
 // use the same implementation as the inlined version.
 template<typename T, class P>
@@ -57,11 +69,18 @@
   // Grow the list capacity by 50%, but make sure to let it grow
   // even when the capacity is zero (possible initial case).
   int new_capacity = 1 + capacity_ + (capacity_ >> 1);
+  // Since the element reference could be an element of the list, copy
+  // it out of the old backing storage before resizing.
+  T temp = element;
+  Resize(new_capacity);
+  data_[length_++] = temp;
+}
+
+
+template<typename T, class P>
+void List<T, P>::Resize(int new_capacity) {
   T* new_data = List<T, P>::NewData(new_capacity);
   memcpy(new_data, data_, capacity_ * sizeof(T));
-  // Since the element reference could be an element of the list,
-  // assign it to the new backing store before deleting the old.
-  new_data[length_++] = element;
   List<T, P>::DeleteData(data_);
   data_ = new_data;
   capacity_ = new_capacity;
diff --git a/src/list.h b/src/list.h
index 92d23ea..b6c06d8 100644
--- a/src/list.h
+++ b/src/list.h
@@ -28,7 +28,8 @@
 #ifndef V8_LIST_H_
 #define V8_LIST_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // ----------------------------------------------------------------------------
@@ -77,6 +78,9 @@
   // expanding the list if necessary.
   void Add(const T& element);
 
+  // Add all the elements from the argument list to this list.
+  void AddAll(const List<T, P>& other);
+
   // Added 'count' elements with the value 'value' and returns a
   // vector that allows access to the elements.  The vector is valid
   // until the next change is made to this list.
@@ -126,6 +130,9 @@
   // non-inlined versions of ResizeAdd.
   void ResizeAddInternal(const T& element);
 
+  // Resize the list.
+  void Resize(int new_capacity);
+
   DISALLOW_COPY_AND_ASSIGN(List);
 };
 
diff --git a/src/log.cc b/src/log.cc
index 08f68fd..59018a1 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -36,7 +36,8 @@
 #include "serialize.h"
 #include "string-stream.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
@@ -188,7 +189,7 @@
 
   void SetProfiler(Profiler* profiler) {
     profiler_ = profiler;
-    if (!IsActive()) Start();
+    if (!FLAG_prof_lazy && !IsActive()) Start();
   }
 
   void ClearProfiler() {
@@ -267,6 +268,8 @@
   // the thread to terminate.
   running_ = false;
   TickSample sample;
+  // Reset 'paused_' flag, otherwise semaphore may not be signalled.
+  resume();
   Insert(&sample);
   Join();
 
@@ -301,30 +304,37 @@
   // Frees all resources acquired in Open... functions.
   static void Close();
 
-  // See description in v8.h.
+  // See description in include/v8.h.
   static int GetLogLines(int from_pos, char* dest_buf, int max_size);
 
-  static bool is_enabled() { return output_.handle != NULL; }
+  // Returns whether logging is enabled.
+  static bool IsEnabled() {
+    return output_handle_ != NULL || output_buffer_ != NULL;
+  }
 
-  typedef int (*WritePtr)(const char* msg, int length);
  private:
+  typedef int (*WritePtr)(const char* msg, int length);
+
+  // Initialization function called from Open... functions.
   static void Init();
 
   // Write functions assume that mutex_ is acquired by the caller.
   static WritePtr Write;
 
+  // Implementation of writing to a log file.
   static int WriteToFile(const char* msg, int length) {
-    ASSERT(output_.handle != NULL);
-    int rv = fwrite(msg, 1, length, output_.handle);
+    ASSERT(output_handle_ != NULL);
+    int rv = fwrite(msg, 1, length, output_handle_);
     ASSERT(length == rv);
     return rv;
   }
 
+  // Implementation of writing to a memory buffer.
   static int WriteToMemory(const char* msg, int length) {
-    ASSERT(output_.buffer != NULL);
-    ASSERT(output_buffer_write_pos_ >= output_.buffer);
+    ASSERT(output_buffer_ != NULL);
+    ASSERT(output_buffer_write_pos_ >= output_buffer_);
     if (output_buffer_write_pos_ + length
-        <= output_.buffer + kOutputBufferSize) {
+        <= output_buffer_ + kOutputBufferSize) {
       memcpy(output_buffer_write_pos_, msg, length);
       output_buffer_write_pos_ += length;
       return length;
@@ -334,14 +344,14 @@
     }
   }
 
-  // When logging is active, output_ refers the file or memory buffer
-  // events are written to.
-  // mutex_ should be acquired before using output_.
-  union Output {
-    FILE* handle;
-    char* buffer;
-  };
-  static Output output_;
+  // When logging is active, either output_handle_ or output_buffer_ is used
+  // to store a pointer to log destination. If logging was opened via OpenStdout
+  // or OpenFile, then output_handle_ is used. If logging was opened
+  // via OpenMemoryBuffer, then output_buffer_ is used.
+  // mutex_ should be acquired before using output_handle_ or output_buffer_.
+  static FILE* output_handle_;
+
+  static char* output_buffer_;
 
   // mutex_ is a Mutex used for enforcing exclusive
   // access to the formatting buffer and the log file or log memory buffer.
@@ -365,7 +375,8 @@
 
 
 Log::WritePtr Log::Write = NULL;
-Log::Output Log::output_ = {NULL};
+FILE* Log::output_handle_ = NULL;
+char* Log::output_buffer_ = NULL;
 Mutex* Log::mutex_ = NULL;
 char* Log::output_buffer_write_pos_ = NULL;
 char* Log::message_buffer_ = NULL;
@@ -378,25 +389,25 @@
 
 
 void Log::OpenStdout() {
-  ASSERT(output_.handle == NULL);
-  output_.handle = stdout;
+  ASSERT(!IsEnabled());
+  output_handle_ = stdout;
   Write = WriteToFile;
   Init();
 }
 
 
 void Log::OpenFile(const char* name) {
-  ASSERT(output_.handle == NULL);
-  output_.handle = OS::FOpen(name, OS::LogFileOpenMode);
+  ASSERT(!IsEnabled());
+  output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
   Write = WriteToFile;
   Init();
 }
 
 
 void Log::OpenMemoryBuffer() {
-  ASSERT(output_.buffer == NULL);
-  output_.buffer = NewArray<char>(kOutputBufferSize);
-  output_buffer_write_pos_ = output_.buffer;
+  ASSERT(!IsEnabled());
+  output_buffer_ = NewArray<char>(kOutputBufferSize);
+  output_buffer_write_pos_ = output_buffer_;
   Write = WriteToMemory;
   Init();
 }
@@ -404,11 +415,11 @@
 
 void Log::Close() {
   if (Write == WriteToFile) {
-    fclose(output_.handle);
-    output_.handle = NULL;
+    fclose(output_handle_);
+    output_handle_ = NULL;
   } else if (Write == WriteToMemory) {
-    DeleteArray(output_.buffer);
-    output_.buffer = NULL;
+    DeleteArray(output_buffer_);
+    output_buffer_ = NULL;
   } else {
     ASSERT(Write == NULL);
   }
@@ -424,15 +435,15 @@
 
 int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
   if (Write != WriteToMemory) return 0;
-  ASSERT(output_.buffer != NULL);
-  ASSERT(output_buffer_write_pos_ >= output_.buffer);
+  ASSERT(output_buffer_ != NULL);
+  ASSERT(output_buffer_write_pos_ >= output_buffer_);
   ASSERT(from_pos >= 0);
   ASSERT(max_size >= 0);
   int actual_size = max_size;
-  char* buffer_read_pos = output_.buffer + from_pos;
+  char* buffer_read_pos = output_buffer_ + from_pos;
   ScopedLock sl(mutex_);
   if (actual_size == 0
-      || output_buffer_write_pos_ == output_.buffer
+      || output_buffer_write_pos_ == output_buffer_
       || buffer_read_pos >= output_buffer_write_pos_) {
     // No data requested or can be returned.
     return 0;
@@ -584,8 +595,8 @@
 SlidingStateWindow* Logger::sliding_state_window_ = NULL;
 
 
-bool Logger::is_enabled() {
-  return Log::is_enabled();
+bool Logger::IsEnabled() {
+  return Log::IsEnabled();
 }
 
 #endif  // ENABLE_LOGGING_AND_PROFILING
@@ -593,7 +604,7 @@
 
 void Logger::Preamble(const char* content) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_code) return;
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   msg.WriteCStringToLogFile(content);
 #endif
@@ -609,7 +620,7 @@
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::UncheckedStringEvent(const char* name, const char* value) {
-  if (!Log::is_enabled()) return;
+  if (!Log::IsEnabled()) return;
   LogMessageBuilder msg;
   msg.Append("%s,\"%s\"\n", name, value);
   msg.WriteToLogFile();
@@ -619,7 +630,7 @@
 
 void Logger::IntEvent(const char* name, int value) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log) return;
+  if (!Log::IsEnabled() || !FLAG_log) return;
   LogMessageBuilder msg;
   msg.Append("%s,%d\n", name, value);
   msg.WriteToLogFile();
@@ -629,7 +640,7 @@
 
 void Logger::HandleEvent(const char* name, Object** location) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_handles) return;
+  if (!Log::IsEnabled() || !FLAG_log_handles) return;
   LogMessageBuilder msg;
   msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
   msg.WriteToLogFile();
@@ -642,7 +653,7 @@
 // caller's responsibility to ensure that log is enabled and that
 // FLAG_log_api is true.
 void Logger::ApiEvent(const char* format, ...) {
-  ASSERT(Log::is_enabled() && FLAG_log_api);
+  ASSERT(Log::IsEnabled() && FLAG_log_api);
   LogMessageBuilder msg;
   va_list ap;
   va_start(ap, format);
@@ -655,7 +666,7 @@
 
 void Logger::ApiNamedSecurityCheck(Object* key) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_api) return;
+  if (!Log::IsEnabled() || !FLAG_log_api) return;
   if (key->IsString()) {
     SmartPointer<char> str =
         String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -673,7 +684,7 @@
                                 unsigned start,
                                 unsigned end) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_prof) return;
+  if (!Log::IsEnabled() || !FLAG_prof) return;
   LogMessageBuilder msg;
   msg.Append("shared-library,\"%s\",0x%08x,0x%08x\n", library_path,
              start, end);
@@ -686,7 +697,7 @@
                                 unsigned start,
                                 unsigned end) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_prof) return;
+  if (!Log::IsEnabled() || !FLAG_prof) return;
   LogMessageBuilder msg;
   msg.Append("shared-library,\"%ls\",0x%08x,0x%08x\n", library_path,
              start, end);
@@ -741,7 +752,7 @@
 
 void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_regexp) return;
+  if (!Log::IsEnabled() || !FLAG_log_regexp) return;
   LogMessageBuilder msg;
   msg.Append("regexp-compile,");
   LogRegExpSource(regexp);
@@ -753,7 +764,7 @@
 
 void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_runtime) return;
+  if (!Log::IsEnabled() || !FLAG_log_runtime) return;
   HandleScope scope;
   LogMessageBuilder msg;
   for (int i = 0; i < format.length(); i++) {
@@ -794,7 +805,7 @@
 
 void Logger::ApiIndexedSecurityCheck(uint32_t index) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_api) return;
+  if (!Log::IsEnabled() || !FLAG_log_api) return;
   ApiEvent("api,check-security,%u\n", index);
 #endif
 }
@@ -805,7 +816,7 @@
                                     Object* name) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   ASSERT(name->IsString());
-  if (!Log::is_enabled() || !FLAG_log_api) return;
+  if (!Log::IsEnabled() || !FLAG_log_api) return;
   String* class_name_obj = holder->class_name();
   SmartPointer<char> class_name =
       class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -819,7 +830,7 @@
                                       JSObject* holder,
                                       uint32_t index) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_api) return;
+  if (!Log::IsEnabled() || !FLAG_log_api) return;
   String* class_name_obj = holder->class_name();
   SmartPointer<char> class_name =
       class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -829,7 +840,7 @@
 
 void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_api) return;
+  if (!Log::IsEnabled() || !FLAG_log_api) return;
   String* class_name_obj = object->class_name();
   SmartPointer<char> class_name =
       class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -840,7 +851,7 @@
 
 void Logger::ApiEntryCall(const char* name) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_api) return;
+  if (!Log::IsEnabled() || !FLAG_log_api) return;
   Logger::ApiEvent("api,%s\n", name);
 #endif
 }
@@ -848,7 +859,7 @@
 
 void Logger::NewEvent(const char* name, void* object, size_t size) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log) return;
+  if (!Log::IsEnabled() || !FLAG_log) return;
   LogMessageBuilder msg;
   msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
              static_cast<unsigned int>(size));
@@ -859,7 +870,7 @@
 
 void Logger::DeleteEvent(const char* name, void* object) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log) return;
+  if (!Log::IsEnabled() || !FLAG_log) return;
   LogMessageBuilder msg;
   msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
   msg.WriteToLogFile();
@@ -869,7 +880,7 @@
 
 void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_code) return;
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"", tag, code->address(),
              code->ExecutableSize());
@@ -888,7 +899,7 @@
 
 void Logger::CodeCreateEvent(const char* tag, Code* code, String* name) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_code) return;
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   SmartPointer<char> str =
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -902,7 +913,7 @@
 void Logger::CodeCreateEvent(const char* tag, Code* code, String* name,
                              String* source, int line) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_code) return;
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   SmartPointer<char> str =
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -919,7 +930,7 @@
 
 void Logger::CodeCreateEvent(const char* tag, Code* code, int args_count) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_code) return;
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"args_count: %d\"\n", tag,
              code->address(),
@@ -932,7 +943,7 @@
 
 void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_code) return;
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"", "RegExp",
              code->address(),
@@ -946,7 +957,7 @@
 
 void Logger::CodeAllocateEvent(Code* code, Assembler* assem) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_code) return;
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   msg.Append("code-allocate,0x%" V8PRIxPTR ",0x%" V8PRIxPTR "\n",
              code->address(),
@@ -958,7 +969,7 @@
 
 void Logger::CodeMoveEvent(Address from, Address to) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_code) return;
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   msg.Append("code-move,0x%" V8PRIxPTR ",0x%" V8PRIxPTR "\n", from, to);
   msg.WriteToLogFile();
@@ -968,7 +979,7 @@
 
 void Logger::CodeDeleteEvent(Address from) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_code) return;
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   msg.Append("code-delete,0x%" V8PRIxPTR "\n", from);
   msg.WriteToLogFile();
@@ -978,7 +989,7 @@
 
 void Logger::ResourceEvent(const char* name, const char* tag) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log) return;
+  if (!Log::IsEnabled() || !FLAG_log) return;
   LogMessageBuilder msg;
   msg.Append("%s,%s,", name, tag);
 
@@ -996,7 +1007,7 @@
 
 void Logger::SuspectReadEvent(String* name, Object* obj) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_suspect) return;
+  if (!Log::IsEnabled() || !FLAG_log_suspect) return;
   LogMessageBuilder msg;
   String* class_name = obj->IsJSObject()
                        ? JSObject::cast(obj)->class_name()
@@ -1015,7 +1026,7 @@
 
 void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_gc) return;
+  if (!Log::IsEnabled() || !FLAG_log_gc) return;
   LogMessageBuilder msg;
   msg.Append("heap-sample-begin,\"%s\",\"%s\"\n", space, kind);
   msg.WriteToLogFile();
@@ -1025,7 +1036,7 @@
 
 void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_gc) return;
+  if (!Log::IsEnabled() || !FLAG_log_gc) return;
   LogMessageBuilder msg;
   msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
   msg.WriteToLogFile();
@@ -1035,7 +1046,7 @@
 
 void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log_gc) return;
+  if (!Log::IsEnabled() || !FLAG_log_gc) return;
   LogMessageBuilder msg;
   msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
   msg.WriteToLogFile();
@@ -1045,7 +1056,7 @@
 
 void Logger::DebugTag(const char* call_site_tag) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log) return;
+  if (!Log::IsEnabled() || !FLAG_log) return;
   LogMessageBuilder msg;
   msg.Append("debug-tag,%s\n", call_site_tag);
   msg.WriteToLogFile();
@@ -1055,7 +1066,7 @@
 
 void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::is_enabled() || !FLAG_log) return;
+  if (!Log::IsEnabled() || !FLAG_log) return;
   StringBuilder s(parameter.length() + 1);
   for (int i = 0; i < parameter.length(); ++i) {
     s.AddCharacter(static_cast<char>(parameter[i]));
@@ -1074,7 +1085,7 @@
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::TickEvent(TickSample* sample, bool overflow) {
-  if (!Log::is_enabled() || !FLAG_prof) return;
+  if (!Log::IsEnabled() || !FLAG_prof) return;
   LogMessageBuilder msg;
   msg.Append("tick,0x%" V8PRIxPTR ",0x%" V8PRIxPTR ",%d",
              sample->pc, sample->sp, static_cast<int>(sample->state));
@@ -1096,18 +1107,97 @@
 
 void Logger::PauseProfiler() {
   profiler_->pause();
+  if (FLAG_prof_lazy) {
+    if (!FLAG_sliding_state_window) ticker_->Stop();
+    FLAG_log_code = false;
+    LOG(UncheckedStringEvent("profiler", "pause"));
+  }
 }
 
 
 void Logger::ResumeProfiler() {
+  if (FLAG_prof_lazy) {
+    LOG(UncheckedStringEvent("profiler", "resume"));
+    FLAG_log_code = true;
+    LogCompiledFunctions();
+    if (!FLAG_sliding_state_window) ticker_->Start();
+  }
   profiler_->resume();
 }
 
 
+bool Logger::IsProfilerSamplerActive() {
+  return ticker_->IsActive();
+}
+
+
 int Logger::GetLogLines(int from_pos, char* dest_buf, int max_size) {
   return Log::GetLogLines(from_pos, dest_buf, max_size);
 }
 
+
+void Logger::LogCompiledFunctions() {
+  HandleScope scope;
+  Handle<SharedFunctionInfo>* sfis = NULL;
+  int compiled_funcs_count = 0;
+
+  {
+    AssertNoAllocation no_alloc;
+
+    HeapIterator iterator;
+    while (iterator.has_next()) {
+      HeapObject* obj = iterator.next();
+      ASSERT(obj != NULL);
+      if (obj->IsSharedFunctionInfo()
+          && SharedFunctionInfo::cast(obj)->is_compiled()) {
+        ++compiled_funcs_count;
+      }
+    }
+
+    sfis = NewArray< Handle<SharedFunctionInfo> >(compiled_funcs_count);
+    iterator.reset();
+
+    int i = 0;
+    while (iterator.has_next()) {
+      HeapObject* obj = iterator.next();
+      ASSERT(obj != NULL);
+      if (obj->IsSharedFunctionInfo()
+          && SharedFunctionInfo::cast(obj)->is_compiled()) {
+        sfis[i++] = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(obj));
+      }
+    }
+  }
+
+  // During iteration, there can be heap allocation due to
+  // GetScriptLineNumber call.
+  for (int i = 0; i < compiled_funcs_count; ++i) {
+    Handle<SharedFunctionInfo> shared = sfis[i];
+    Handle<String> name(String::cast(shared->name()));
+    Handle<String> func_name(name->length() > 0 ?
+                             *name : shared->inferred_name());
+    if (shared->script()->IsScript()) {
+      Handle<Script> script(Script::cast(shared->script()));
+      if (script->name()->IsString()) {
+        Handle<String> script_name(String::cast(script->name()));
+        int line_num = GetScriptLineNumber(script, shared->start_position());
+        if (line_num > 0) {
+          line_num += script->line_offset()->value() + 1;
+          LOG(CodeCreateEvent("LazyCompile", shared->code(), *func_name,
+                              *script_name, line_num));
+        } else {
+          // Can't distinguish enum and script here, so always use Script.
+          LOG(CodeCreateEvent("Script", shared->code(), *script_name));
+        }
+        continue;
+      }
+    }
+    // If no script or script has no name.
+    LOG(CodeCreateEvent("LazyCompile", shared->code(), *func_name));
+  }
+
+  DeleteArray(sfis);
+}
+
 #endif
 
 
@@ -1127,9 +1217,15 @@
   // --prof implies --log-code.
   if (FLAG_prof) FLAG_log_code = true;
 
+  // --prof_lazy controls --log-code, implies --noprof_auto.
+  if (FLAG_prof_lazy) {
+    FLAG_log_code = false;
+    FLAG_prof_auto = false;
+  }
+
   bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api
       || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
-      || FLAG_log_regexp || FLAG_log_state_changes;
+      || FLAG_log_regexp || FLAG_log_state_changes || FLAG_prof_lazy;
 
   // If we're logging anything, we need to open the log file.
   if (open_log_file) {
@@ -1214,8 +1310,10 @@
   }
 
   delete sliding_state_window_;
+  sliding_state_window_ = NULL;
 
   delete ticker_;
+  ticker_ = NULL;
 
   Log::Close();
 #endif
@@ -1266,7 +1364,7 @@
 VMState::VMState(StateTag state) {
 #if !defined(ENABLE_HEAP_PROTECTION)
   // When not protecting the heap, there is no difference between
-  // EXTERNAL and OTHER.  As an optimizatin in that case, we will not
+  // EXTERNAL and OTHER.  As an optimization in that case, we will not
   // perform EXTERNAL->OTHER transitions through the API.  We thus
   // compress the two states into one.
   if (state == EXTERNAL) state = OTHER;
diff --git a/src/log.h b/src/log.h
index 5f3c188..56cf93d 100644
--- a/src/log.h
+++ b/src/log.h
@@ -28,7 +28,8 @@
 #ifndef V8_LOG_H_
 #define V8_LOG_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Logger is used for collecting logging information from V8 during
 // execution. The result is dumped to a file.
@@ -75,7 +76,7 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
 #define LOG(Call)                           \
   do {                                      \
-    if (v8::internal::Logger::is_enabled()) \
+    if (v8::internal::Logger::IsEnabled()) \
       v8::internal::Logger::Call;           \
   } while (false)
 #else
@@ -201,7 +202,7 @@
     return current_state_ ? current_state_->state() : OTHER;
   }
 
-  static bool is_enabled();
+  static bool IsEnabled();
 
   // Pause/Resume collection of profiling data.
   // When data collection is paused, Tick events are discarded until
@@ -214,6 +215,9 @@
   // retrieve previously written messages. See v8.h.
   static int GetLogLines(int from_pos, char* dest_buf, int max_size);
 
+  // Logs all compiled functions found in the heap.
+  static void LogCompiledFunctions();
+
  private:
 
   // Emits the source code of a regexp. Used by regexp events.
@@ -227,6 +231,9 @@
   // Logs a StringEvent regardless of whether FLAG_log is true.
   static void UncheckedStringEvent(const char* name, const char* value);
 
+  // Returns whether profiler's sampler is active.
+  static bool IsProfilerSamplerActive();
+
   // The sampler used by the profiler and the sliding state window.
   static Ticker* ticker_;
 
@@ -252,6 +259,8 @@
   friend class Profiler;
   friend class SlidingStateWindow;
   friend class VMState;
+
+  friend class LoggerTestHelper;
 #else
   static bool is_enabled() { return false; }
 #endif
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index e09e15f..56e4ea6 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -33,7 +33,8 @@
 #include "mark-compact.h"
 #include "stub-cache.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // MarkCompactCollector
diff --git a/src/mark-compact.h b/src/mark-compact.h
index bfa2c3c..d7ad630 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -28,7 +28,8 @@
 #ifndef V8_MARK_COMPACT_H_
 #define V8_MARK_COMPACT_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Callback function, returns whether an object is alive. The heap size
 // of the object is returned in size. It optionally updates the offset
diff --git a/src/memory.h b/src/memory.h
index 2397bc6..86a9188 100644
--- a/src/memory.h
+++ b/src/memory.h
@@ -28,7 +28,8 @@
 #ifndef V8_MEMORY_H_
 #define V8_MEMORY_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Memory provides an interface to 'raw' memory. It encapsulates the casts
 // that typically are needed when incompatible pointer types are used.
diff --git a/src/messages.cc b/src/messages.cc
index ca0ce2a..a3fffcb 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -33,7 +33,8 @@
 #include "spaces-inl.h"
 #include "top.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // If no message listeners have been registered this one is called
diff --git a/src/messages.h b/src/messages.h
index 1ff10aa..80ce8eb 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -36,7 +36,8 @@
 #include "handles-inl.h"
 
 // Forward declaration of MessageLocation.
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 class MessageLocation;
 } }  // namespace v8::internal
 
@@ -57,7 +58,8 @@
 };
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 struct Language;
 class SourceInfo;
diff --git a/src/natives.h b/src/natives.h
index 3eb8090..fdfd213 100644
--- a/src/natives.h
+++ b/src/natives.h
@@ -28,7 +28,8 @@
 #ifndef V8_NATIVES_H_
 #define V8_NATIVES_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 typedef bool (*NativeSourceCallback)(Vector<const char> name,
                                      Vector<const char> source,
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index e172014..ba07af7 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -32,7 +32,8 @@
 #include "macro-assembler.h"
 #include "jsregexp.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #ifdef DEBUG
 
@@ -941,6 +942,8 @@
   column_offset()->ShortPrint();
   PrintF("\n - type: ");
   type()->ShortPrint();
+  PrintF("\n - id: ");
+  id()->ShortPrint();
   PrintF("\n");
 }
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 7821178..475b57b 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -40,7 +40,8 @@
 #include "conversions-inl.h"
 #include "property.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 PropertyDetails::PropertyDetails(Smi* smi) {
   value_ = smi->value();
@@ -764,9 +765,11 @@
 
 Failure* Failure::Construct(Type type, int value) {
   int info = (value << kFailureTypeTagSize) | type;
+  // TODO(X64): Stop using Smi validation for non-smi checks, even if they
+  // happen to be identical at the moment.
   ASSERT(Smi::IsValid(info));  // Same validation check as in Smi
   return reinterpret_cast<Failure*>(
-      static_cast<intptr_t>((info << kFailureTagSize) | kFailureTag));
+      (static_cast<intptr_t>(info) << kFailureTagSize) | kFailureTag);
 }
 
 
@@ -1794,7 +1797,7 @@
 
 
 void Map::set_instance_size(int value) {
-  ASSERT((value & ~(kPointerSize - 1)) == value);
+  ASSERT_EQ(0, value & (kPointerSize - 1));
   value >>= kPointerSizeLog2;
   ASSERT(0 <= value && value < 256);
   WRITE_BYTE_FIELD(this, kInstanceSizeOffset, static_cast<byte>(value));
@@ -1895,6 +1898,11 @@
 }
 
 
+InLoopFlag Code::ic_in_loop() {
+  return ExtractICInLoopFromFlags(flags());
+}
+
+
 InlineCacheState Code::ic_state() {
   InlineCacheState result = ExtractICStateFromFlags(flags());
   // Only allow uninitialized or debugger states for non-IC code
@@ -1941,11 +1949,13 @@
 
 
 Code::Flags Code::ComputeFlags(Kind kind,
+                               InLoopFlag in_loop,
                                InlineCacheState ic_state,
                                PropertyType type,
                                int argc) {
   // Compute the bit mask.
   int bits = kind << kFlagsKindShift;
+  if (in_loop) bits |= kFlagsICInLoopMask;
   bits |= ic_state << kFlagsICStateShift;
   bits |= type << kFlagsTypeShift;
   bits |= argc << kFlagsArgumentsCountShift;
@@ -1953,6 +1963,7 @@
   Flags result = static_cast<Flags>(bits);
   ASSERT(ExtractKindFromFlags(result) == kind);
   ASSERT(ExtractICStateFromFlags(result) == ic_state);
+  ASSERT(ExtractICInLoopFromFlags(result) == in_loop);
   ASSERT(ExtractTypeFromFlags(result) == type);
   ASSERT(ExtractArgumentsCountFromFlags(result) == argc);
   return result;
@@ -1961,8 +1972,9 @@
 
 Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
                                           PropertyType type,
+                                          InLoopFlag in_loop,
                                           int argc) {
-  return ComputeFlags(kind, MONOMORPHIC, type, argc);
+  return ComputeFlags(kind, in_loop, MONOMORPHIC, type, argc);
 }
 
 
@@ -1978,6 +1990,12 @@
 }
 
 
+InLoopFlag Code::ExtractICInLoopFromFlags(Flags flags) {
+  int bits = (flags & kFlagsICInLoopMask);
+  return bits != 0 ? IN_LOOP : NOT_IN_LOOP;
+}
+
+
 PropertyType Code::ExtractTypeFromFlags(Flags flags) {
   int bits = (flags & kFlagsTypeMask) >> kFlagsTypeShift;
   return static_cast<PropertyType>(bits);
diff --git a/src/objects.cc b/src/objects.cc
index cb8acf7..a3526eb 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -41,7 +41,8 @@
 #include "disassembler.h"
 #endif
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Getters and setters are stored in a fixed array property.  These are
 // constants for their indices.
@@ -536,6 +537,9 @@
 
 Failure* Failure::RetryAfterGC(int requested_bytes, AllocationSpace space) {
   ASSERT((space & ~kSpaceTagMask) == 0);
+  // TODO(X64): Stop using Smi validation for non-smi checks, even if they
+  // happen to be identical at the moment.
+
   int requested = requested_bytes >> kObjectAlignmentBits;
   int value = (requested << kSpaceTagSize) | space;
   // We can't very well allocate a heap number in this situation, and if the
@@ -4856,7 +4860,6 @@
 const char* Code::ICState2String(InlineCacheState state) {
   switch (state) {
     case UNINITIALIZED: return "UNINITIALIZED";
-    case UNINITIALIZED_IN_LOOP: return "UNINITIALIZED_IN_LOOP";
     case PREMONOMORPHIC: return "PREMONOMORPHIC";
     case MONOMORPHIC: return "MONOMORPHIC";
     case MONOMORPHIC_PROTOTYPE_FAILURE: return "MONOMORPHIC_PROTOTYPE_FAILURE";
@@ -5936,20 +5939,6 @@
 }
 
 
-// Thomas Wang, Integer Hash Functions.
-// http://www.concentric.net/~Ttwang/tech/inthash.htm
-static uint32_t ComputeIntegerHash(uint32_t key) {
-  uint32_t hash = key;
-  hash = ~hash + (hash << 15);  // hash = (hash << 15) - hash - 1;
-  hash = hash ^ (hash >> 12);
-  hash = hash + (hash << 2);
-  hash = hash ^ (hash >> 4);
-  hash = hash * 2057;  // hash = (hash + (hash << 3)) + (hash << 11);
-  hash = hash ^ (hash >> 16);
-  return hash;
-}
-
-
 // The NumberKey uses carries the uint32_t as key.
 // This avoids allocation in HasProperty.
 class NumberKey : public HashTableKey {
diff --git a/src/objects.h b/src/objects.h
index 2619c89..6bdddeb 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -50,7 +50,6 @@
 //           - JSBuiltinsObject
 //         - JSGlobalProxy
 //         - JSValue
-//         - Script
 //       - Array
 //         - ByteArray
 //         - FixedArray
@@ -83,8 +82,10 @@
 //         - AccessCheckInfo
 //         - InterceptorInfo
 //         - CallHandlerInfo
-//         - FunctionTemplateInfo
-//         - ObjectTemplateInfo
+//         - TemplateInfo
+//           - FunctionTemplateInfo
+//           - ObjectTemplateInfo
+//         - Script
 //         - SignatureInfo
 //         - TypeSwitchInfo
 //         - DebugInfo
@@ -108,7 +109,8 @@
   // a non-existent property.
 };
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // PropertyDetails captures type and attributes for a property.
@@ -295,12 +297,14 @@
 // Since string types are not consecutive, this macro is used to
 // iterate over them.
 #define STRING_TYPE_LIST(V)                                                    \
-  V(SHORT_SYMBOL_TYPE, SeqTwoByteString::kHeaderSize, short_symbol)            \
-  V(MEDIUM_SYMBOL_TYPE, SeqTwoByteString::kHeaderSize, medium_symbol)          \
-  V(LONG_SYMBOL_TYPE, SeqTwoByteString::kHeaderSize, long_symbol)              \
-  V(SHORT_ASCII_SYMBOL_TYPE, SeqAsciiString::kHeaderSize, short_ascii_symbol)  \
-  V(MEDIUM_ASCII_SYMBOL_TYPE, SeqAsciiString::kHeaderSize, medium_ascii_symbol)\
-  V(LONG_ASCII_SYMBOL_TYPE, SeqAsciiString::kHeaderSize, long_ascii_symbol)    \
+  V(SHORT_SYMBOL_TYPE, SeqTwoByteString::kAlignedSize, short_symbol)           \
+  V(MEDIUM_SYMBOL_TYPE, SeqTwoByteString::kAlignedSize, medium_symbol)         \
+  V(LONG_SYMBOL_TYPE, SeqTwoByteString::kAlignedSize, long_symbol)             \
+  V(SHORT_ASCII_SYMBOL_TYPE, SeqAsciiString::kAlignedSize, short_ascii_symbol) \
+  V(MEDIUM_ASCII_SYMBOL_TYPE,                                                  \
+    SeqAsciiString::kAlignedSize,                                              \
+    medium_ascii_symbol)                                                       \
+  V(LONG_ASCII_SYMBOL_TYPE, SeqAsciiString::kAlignedSize, long_ascii_symbol)   \
   V(SHORT_CONS_SYMBOL_TYPE, ConsString::kSize, short_cons_symbol)              \
   V(MEDIUM_CONS_SYMBOL_TYPE, ConsString::kSize, medium_cons_symbol)            \
   V(LONG_CONS_SYMBOL_TYPE, ConsString::kSize, long_cons_symbol)                \
@@ -337,12 +341,14 @@
   V(LONG_EXTERNAL_ASCII_SYMBOL_TYPE,                                           \
     ExternalAsciiString::kSize,                                                \
     long_external_ascii_symbol)                                                \
-  V(SHORT_STRING_TYPE, SeqTwoByteString::kHeaderSize, short_string)            \
-  V(MEDIUM_STRING_TYPE, SeqTwoByteString::kHeaderSize, medium_string)          \
-  V(LONG_STRING_TYPE, SeqTwoByteString::kHeaderSize, long_string)              \
-  V(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize, short_ascii_string)  \
-  V(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize, medium_ascii_string)\
-  V(LONG_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize, long_ascii_string)    \
+  V(SHORT_STRING_TYPE, SeqTwoByteString::kAlignedSize, short_string)           \
+  V(MEDIUM_STRING_TYPE, SeqTwoByteString::kAlignedSize, medium_string)         \
+  V(LONG_STRING_TYPE, SeqTwoByteString::kAlignedSize, long_string)             \
+  V(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize, short_ascii_string) \
+  V(MEDIUM_ASCII_STRING_TYPE,                                                  \
+    SeqAsciiString::kAlignedSize,                                              \
+    medium_ascii_string)                                                       \
+  V(LONG_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize, long_ascii_string)   \
   V(SHORT_CONS_STRING_TYPE, ConsString::kSize, short_cons_string)              \
   V(MEDIUM_CONS_STRING_TYPE, ConsString::kSize, medium_cons_string)            \
   V(LONG_CONS_STRING_TYPE, ConsString::kSize, long_cons_string)                \
@@ -771,8 +777,10 @@
 
 
 // Smi represents integer Numbers that can be stored in 31 bits.
+// TODO(X64) Increase to 53 bits?
 // Smis are immediate which means they are NOT allocated in the heap.
 // The this pointer has the following format: [31 bit signed int] 0
+// TODO(X64): 31 bits signed int sign-extended to 63 bits.
 // Smi stands for small integer.
 class Smi: public Object {
  public:
@@ -1550,6 +1558,7 @@
   // Layout descriptor.
   static const int kLengthOffset = HeapObject::kHeaderSize;
   static const int kHeaderSize = kLengthOffset + kIntSize;
+  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
@@ -1596,6 +1605,9 @@
   // Casting.
   static inline FixedArray* cast(Object* obj);
 
+  // Align data at kPointerSize, even if Array.kHeaderSize isn't aligned.
+  static const int kHeaderSize = POINTER_SIZE_ALIGN(Array::kHeaderSize);
+
   // Dispatched behavior.
   int FixedArraySize() { return SizeFor(length()); }
   void FixedArrayIterateBody(ObjectVisitor* v);
@@ -2147,7 +2159,7 @@
   inline int get_int(int index);
 
   static int SizeFor(int length) {
-    return kHeaderSize + OBJECT_SIZE_ALIGN(length);
+    return OBJECT_SIZE_ALIGN(kHeaderSize + length);
   }
   // We use byte arrays for free blocks in the heap.  Given a desired size in
   // bytes that is a multiple of the word size and big enough to hold a byte
@@ -2241,9 +2253,10 @@
 
   // [flags]: Access to specific code flags.
   inline Kind kind();
-  inline InlineCacheState ic_state();  // only valid for IC stubs
-  inline PropertyType type();  // only valid for monomorphic IC stubs
-  inline int arguments_count();  // only valid for call IC stubs
+  inline InlineCacheState ic_state();  // Only valid for IC stubs.
+  inline InLoopFlag ic_in_loop();  // Only valid for IC stubs..
+  inline PropertyType type();  // Only valid for monomorphic IC stubs.
+  inline int arguments_count();  // Only valid for call IC stubs.
 
   // Testers for IC stub kinds.
   inline bool is_inline_cache_stub();
@@ -2265,16 +2278,20 @@
 
   // Flags operations.
   static inline Flags ComputeFlags(Kind kind,
+                                   InLoopFlag in_loop = NOT_IN_LOOP,
                                    InlineCacheState ic_state = UNINITIALIZED,
                                    PropertyType type = NORMAL,
                                    int argc = -1);
 
-  static inline Flags ComputeMonomorphicFlags(Kind kind,
-                                              PropertyType type,
-                                              int argc = -1);
+  static inline Flags ComputeMonomorphicFlags(
+      Kind kind,
+      PropertyType type,
+      InLoopFlag in_loop = NOT_IN_LOOP,
+      int argc = -1);
 
   static inline Kind ExtractKindFromFlags(Flags flags);
   static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
+  static inline InLoopFlag ExtractICInLoopFromFlags(Flags flags);
   static inline PropertyType ExtractTypeFromFlags(Flags flags);
   static inline int ExtractArgumentsCountFromFlags(Flags flags);
   static inline Flags RemoveTypeFromFlags(Flags flags);
@@ -2344,6 +2361,9 @@
   void CodePrint();
   void CodeVerify();
 #endif
+  // Code entry points are aligned to 32 bytes.
+  static const int kCodeAlignment = 32;
+  static const int kCodeAlignmentMask = kCodeAlignment - 1;
 
   // Layout description.
   static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
@@ -2351,14 +2371,11 @@
   static const int kSInfoSizeOffset = kRelocationSizeOffset + kIntSize;
   static const int kFlagsOffset = kSInfoSizeOffset + kIntSize;
   static const int kKindSpecificFlagsOffset  = kFlagsOffset + kIntSize;
-  // Add filler objects to align the instruction start following right after
+  // Add padding to align the instruction start following right after
   // the Code object header.
-  static const int kFiller6Offset = kKindSpecificFlagsOffset + kIntSize;
-  static const int kFiller7Offset = kFiller6Offset + kIntSize;
-  static const int kHeaderSize = kFiller7Offset + kIntSize;
-
-  // Code entry points are aligned to 32 bytes.
-  static const int kCodeAlignment = 32;
+  static const int kHeaderSize =
+      (kKindSpecificFlagsOffset + kIntSize + kCodeAlignmentMask) &
+          ~kCodeAlignmentMask;
 
   // Byte offsets within kKindSpecificFlagsOffset.
   static const int kICFlagOffset = kKindSpecificFlagsOffset + 0;
@@ -2366,14 +2383,19 @@
 
   // Flags layout.
   static const int kFlagsICStateShift        = 0;
-  static const int kFlagsKindShift           = 3;
-  static const int kFlagsTypeShift           = 6;
-  static const int kFlagsArgumentsCountShift = 9;
+  static const int kFlagsICInLoopShift       = 3;
+  static const int kFlagsKindShift           = 4;
+  static const int kFlagsTypeShift           = 7;
+  static const int kFlagsArgumentsCountShift = 10;
 
-  static const int kFlagsICStateMask        = 0x00000007;  // 000000111
-  static const int kFlagsKindMask           = 0x00000038;  // 000111000
-  static const int kFlagsTypeMask           = 0x000001C0;  // 111000000
-  static const int kFlagsArgumentsCountMask = 0xFFFFFE00;
+  static const int kFlagsICStateMask        = 0x00000007;  // 0000000111
+  static const int kFlagsICInLoopMask       = 0x00000008;  // 0000001000
+  static const int kFlagsKindMask           = 0x00000070;  // 0001110000
+  static const int kFlagsTypeMask           = 0x00000380;  // 1110000000
+  static const int kFlagsArgumentsCountMask = 0xFFFFFC00;
+
+  static const int kFlagsNotUsedInLookup =
+      (kFlagsICInLoopMask | kFlagsTypeMask);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
@@ -2567,7 +2589,7 @@
   static const int kInstanceDescriptorsOffset =
       kConstructorOffset + kPointerSize;
   static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
-  static const int kSize = kCodeCacheOffset + kIntSize;
+  static const int kSize = kCodeCacheOffset + kPointerSize;
 
   // Byte offsets within kInstanceSizesOffset.
   static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@@ -2776,21 +2798,23 @@
   static const int kDontAdaptArgumentsSentinel = -1;
 
   // Layout description.
+  // (An even number of integers has a size that is a multiple of a pointer.)
   static const int kNameOffset = HeapObject::kHeaderSize;
   static const int kCodeOffset = kNameOffset + kPointerSize;
   static const int kLengthOffset = kCodeOffset + kPointerSize;
   static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
   static const int kExpectedNofPropertiesOffset =
       kFormalParameterCountOffset + kIntSize;
-  static const int kInstanceClassNameOffset =
+  static const int kStartPositionAndTypeOffset =
       kExpectedNofPropertiesOffset + kIntSize;
+  static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
+  static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
+  static const int kInstanceClassNameOffset =
+      kFunctionTokenPositionOffset + kIntSize;
   static const int kExternalReferenceDataOffset =
       kInstanceClassNameOffset + kPointerSize;
   static const int kScriptOffset = kExternalReferenceDataOffset + kPointerSize;
-  static const int kStartPositionAndTypeOffset = kScriptOffset + kPointerSize;
-  static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
-  static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
-  static const int kDebugInfoOffset = kFunctionTokenPositionOffset + kIntSize;
+  static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
   static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
   static const int kSize = kInferredNameOffset + kPointerSize;
 
@@ -3100,7 +3124,7 @@
 #endif
 
   static const int kDataOffset = JSObject::kHeaderSize;
-  static const int kSize = kDataOffset + kIntSize;
+  static const int kSize = kDataOffset + kPointerSize;
 
   // Indices in the data array.
   static const int kTagIndex = 0;
@@ -3365,6 +3389,7 @@
   // Layout description.
   static const int kLengthOffset = HeapObject::kHeaderSize;
   static const int kSize = kLengthOffset + kIntSize;
+  // Notice: kSize is not pointer-size aligned if pointers are 64-bit.
 
   // Limits on sizes of different types of strings.
   static const int kMaxShortStringSize = 63;
@@ -3513,11 +3538,12 @@
 
   // Computes the size for an AsciiString instance of a given length.
   static int SizeFor(int length) {
-    return kHeaderSize + OBJECT_SIZE_ALIGN(length * kCharSize);
+    return OBJECT_SIZE_ALIGN(kHeaderSize + length * kCharSize);
   }
 
   // Layout description.
   static const int kHeaderSize = String::kSize;
+  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
 
   // Support for StringInputBuffer.
   inline void SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
@@ -3558,11 +3584,12 @@
 
   // Computes the size for a TwoByteString instance of a given length.
   static int SizeFor(int length) {
-    return kHeaderSize + OBJECT_SIZE_ALIGN(length * kShortSize);
+    return OBJECT_SIZE_ALIGN(kHeaderSize + length * kShortSize);
   }
 
   // Layout description.
   static const int kHeaderSize = String::kSize;
+  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
 
   // Support for StringInputBuffer.
   inline void SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
@@ -3612,7 +3639,7 @@
   void ConsStringIterateBody(ObjectVisitor* v);
 
   // Layout description.
-  static const int kFirstOffset = String::kSize;
+  static const int kFirstOffset = POINTER_SIZE_ALIGN(String::kSize);
   static const int kSecondOffset = kFirstOffset + kPointerSize;
   static const int kSize = kSecondOffset + kPointerSize;
 
@@ -3656,9 +3683,18 @@
   void SlicedStringIterateBody(ObjectVisitor* v);
 
   // Layout description
+#if V8_HOST_ARCH_64_BIT
+  // Optimizations expect buffer to be located at same offset as a ConsString's
+  // first substring. In 64 bit mode we have room for the size before the
+  // buffer.
+  static const int kStartOffset = String::kSize;
+  static const int kBufferOffset = kStartOffset + kIntSize;
+  static const int kSize = kBufferOffset + kPointerSize;
+#else
   static const int kBufferOffset = String::kSize;
   static const int kStartOffset = kBufferOffset + kPointerSize;
   static const int kSize = kStartOffset + kIntSize;
+#endif
 
   // Support for StringInputBuffer.
   inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer,
@@ -3688,7 +3724,7 @@
   static inline ExternalString* cast(Object* obj);
 
   // Layout description.
-  static const int kResourceOffset = String::kSize;
+  static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
   static const int kSize = kResourceOffset + kPointerSize;
 
  private:
@@ -4148,7 +4184,7 @@
   static const int kConstructorOffset = TemplateInfo::kHeaderSize;
   static const int kInternalFieldCountOffset =
       kConstructorOffset + kPointerSize;
-  static const int kSize = kInternalFieldCountOffset + kHeaderSize;
+  static const int kSize = kInternalFieldCountOffset + kPointerSize;
 };
 
 
diff --git a/src/oprofile-agent.cc b/src/oprofile-agent.cc
index e9f7d3e..c4595b4 100644
--- a/src/oprofile-agent.cc
+++ b/src/oprofile-agent.cc
@@ -29,7 +29,8 @@
 
 #include "oprofile-agent.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #ifdef ENABLE_OPROFILE_AGENT
 op_agent_t OProfileAgent::handle_ = NULL;
diff --git a/src/oprofile-agent.h b/src/oprofile-agent.h
index 75cfe18..4c299bf 100644
--- a/src/oprofile-agent.h
+++ b/src/oprofile-agent.h
@@ -39,7 +39,8 @@
 #include <opagent.h>  // NOLINT
 #endif
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 class OProfileAgent {
  public:
diff --git a/src/parser.cc b/src/parser.cc
index ca7a284..271c3fd 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -37,7 +37,8 @@
 #include "scopes.h"
 #include "string-stream.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 class ParserFactory;
 class ParserLog;
diff --git a/src/parser.h b/src/parser.h
index 4c1401c..c029c4b 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -31,7 +31,8 @@
 #include "scanner.h"
 #include "allocation.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 class ParserMessage : public Malloced {
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 82208f1..acef74c 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -55,7 +55,8 @@
 #include "platform.h"
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // 0 is never a valid thread id on FreeBSD since tids and pids share a
 // name space and pid 0 is used to kill the group (see man 2 kill).
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 6e1faf8..1b07f4d 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -58,7 +58,8 @@
 #include "platform.h"
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // 0 is never a valid thread id on Linux since tids and pids share a
 // name space and pid 0 is reserved (see man 2 kill).
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 9dddfd3..3e0e284 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -58,7 +58,8 @@
 
 #include "platform.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // 0 is never a valid thread id on MacOSX since a ptread_t is
 // a pointer.
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index 42583f1..60ae76d 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -37,7 +37,8 @@
 #include "platform.h"
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Give V8 the opportunity to override the default ceil behaviour.
 double ceiling(double x) {
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index de16ef5..d628a51 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -47,7 +47,8 @@
 #include "platform.h"
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // ----------------------------------------------------------------------------
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 6c4e67a..1b0f9b2 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -58,6 +58,12 @@
 
 #include <time.h>  // For LocalOffset() implementation.
 #include <mmsystem.h>  // For timeGetTime().
+#ifdef __MINGW32__
+// Require Windows XP or higher when compiling with MinGW. This is for MinGW
+// header files to expose getaddrinfo.
+#undef _WIN32_WINNT
+#define _WIN32_WINNT 0x501
+#endif  // __MINGW32__
 #ifndef __MINGW32__
 #include <dbghelp.h>  // For SymLoadModule64 and al.
 #endif  // __MINGW32__
@@ -210,7 +216,8 @@
 }
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 double ceiling(double x) {
   return ceil(x);
diff --git a/src/platform.h b/src/platform.h
index e23abfc..f7fdafe 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -60,7 +60,8 @@
 
 #define INFINITY HUGE_VAL
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 int isfinite(double x);
 } }
 int isnan(double x);
@@ -105,7 +106,8 @@
 
 #endif  // __GNUC__
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 double ceiling(double x);
 
@@ -518,10 +520,11 @@
   // Is the sampler used for profiling.
   inline bool IsProfiling() { return profiling_; }
 
-  class PlatformData;
- protected:
+  // Whether the sampler is running (that is, consumes resources).
   inline bool IsActive() { return active_; }
 
+  class PlatformData;
+
  private:
   int interval_;
   bool profiling_;
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index b58000a..641f754 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -33,7 +33,8 @@
 #include "scopes.h"
 #include "platform.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #ifdef DEBUG
 
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 720fe7b..bfce9b0 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -30,7 +30,8 @@
 
 #include "ast.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #ifdef DEBUG
 
diff --git a/src/property.cc b/src/property.cc
index 6c21530..2915c4a 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -27,7 +27,8 @@
 
 #include "v8.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 void DescriptorWriter::Write(Descriptor* desc) {
diff --git a/src/property.h b/src/property.h
index 60a9b54..edab97a 100644
--- a/src/property.h
+++ b/src/property.h
@@ -28,7 +28,8 @@
 #ifndef V8_PROPERTY_H_
 #define V8_PROPERTY_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // Abstraction for elements in instance-descriptor arrays.
diff --git a/src/regexp-macro-assembler-irregexp-inl.h b/src/regexp-macro-assembler-irregexp-inl.h
index fa4c3d1..5074f21 100644
--- a/src/regexp-macro-assembler-irregexp-inl.h
+++ b/src/regexp-macro-assembler-irregexp-inl.h
@@ -35,7 +35,8 @@
 #ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
 #define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
diff --git a/src/regexp-macro-assembler-irregexp.cc b/src/regexp-macro-assembler-irregexp.cc
index 436db35..b87c51f 100644
--- a/src/regexp-macro-assembler-irregexp.cc
+++ b/src/regexp-macro-assembler-irregexp.cc
@@ -33,7 +33,8 @@
 #include "regexp-macro-assembler-irregexp-inl.h"
 
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer)
diff --git a/src/regexp-macro-assembler-irregexp.h b/src/regexp-macro-assembler-irregexp.h
index 9ed82e3..597046c 100644
--- a/src/regexp-macro-assembler-irregexp.h
+++ b/src/regexp-macro-assembler-irregexp.h
@@ -28,7 +28,8 @@
 #ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
 #define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index 74345d8..30eb485 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -30,7 +30,8 @@
 #include "regexp-macro-assembler.h"
 #include "regexp-macro-assembler-tracer.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
     RegExpMacroAssembler* assembler) :
diff --git a/src/regexp-macro-assembler-tracer.h b/src/regexp-macro-assembler-tracer.h
index f25289e..0fd73f3 100644
--- a/src/regexp-macro-assembler-tracer.h
+++ b/src/regexp-macro-assembler-tracer.h
@@ -28,7 +28,8 @@
 #ifndef V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
 #define V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Decorator on a RegExpMacroAssembler that write all calls.
 class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 913ac64..8dede30 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -31,7 +31,8 @@
 #include "assembler.h"
 #include "regexp-macro-assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 RegExpMacroAssembler::RegExpMacroAssembler() {
 }
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index a3f398d..4849864 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -28,7 +28,8 @@
 #ifndef V8_REGEXP_MACRO_ASSEMBLER_H_
 #define V8_REGEXP_MACRO_ASSEMBLER_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 struct DisjunctDecisionRow {
   RegExpCharacterClass cc;
diff --git a/src/regexp-stack.cc b/src/regexp-stack.cc
index 05daa58..83cb6e4 100644
--- a/src/regexp-stack.cc
+++ b/src/regexp-stack.cc
@@ -29,7 +29,8 @@
 #include "top.h"
 #include "regexp-stack.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 RegExpStack::RegExpStack() {
   // Initialize, if not already initialized.
diff --git a/src/regexp-stack.h b/src/regexp-stack.h
index b955e76..6c090da 100644
--- a/src/regexp-stack.h
+++ b/src/regexp-stack.h
@@ -28,7 +28,8 @@
 #ifndef V8_REGEXP_STACK_H_
 #define V8_REGEXP_STACK_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Maintains a per-v8thread stack area that can be used by irregexp
 // implementation for its backtracking stack.
diff --git a/src/register-allocator-inl.h b/src/register-allocator-inl.h
index 800d530..a94f24e 100644
--- a/src/register-allocator-inl.h
+++ b/src/register-allocator-inl.h
@@ -32,7 +32,8 @@
 #include "register-allocator.h"
 #include "virtual-frame.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 Result::~Result() {
   if (is_register()) {
diff --git a/src/register-allocator.cc b/src/register-allocator.cc
index d6f850e..a3939cb 100644
--- a/src/register-allocator.cc
+++ b/src/register-allocator.cc
@@ -30,7 +30,8 @@
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // Result implementation.
diff --git a/src/register-allocator.h b/src/register-allocator.h
index 25bd026..76f4317 100644
--- a/src/register-allocator.h
+++ b/src/register-allocator.h
@@ -30,7 +30,8 @@
 
 #include "macro-assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // -------------------------------------------------------------------------
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 983792d..e0a0226 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -32,7 +32,8 @@
 #include "scopes.h"
 #include "rewriter.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 class AstOptimizer: public AstVisitor {
diff --git a/src/rewriter.h b/src/rewriter.h
index aa2f981..8943e75 100644
--- a/src/rewriter.h
+++ b/src/rewriter.h
@@ -28,7 +28,8 @@
 #ifndef V8_REWRITER_H_
 #define V8_REWRITER_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // Currently, the rewriter takes function literals (only top-level)
diff --git a/src/runtime.cc b/src/runtime.cc
index f5b366f..1f67c4d 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -46,7 +46,8 @@
 #include "smart-pointer.h"
 #include "parser.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 #define RUNTIME_ASSERT(value) do {                                   \
@@ -1420,6 +1421,7 @@
 
   void AddElement(Object* element) {
     ASSERT(element->IsSmi() || element->IsString());
+    ASSERT(parts_->length() > part_count_);
     parts_->set(part_count_, element);
     part_count_++;
   }
@@ -1589,6 +1591,7 @@
             if (i > last) {
               parts->Add(ReplacementPart::ReplacementSubString(last, i));
             }
+            ASSERT(capture_ref <= capture_count);
             parts->Add(ReplacementPart::SubjectCapture(capture_ref));
             last = next_index + 1;
           }
@@ -2035,7 +2038,7 @@
   BoyerMoorePopulateGoodSuffixTable(pattern, start);
   pchar last_char = pattern[m - 1];
   // Continue search from i.
-  do {
+  while (idx <= n - m) {
     int j = m - 1;
     schar c;
     while (last_char != (c = subject[idx + j])) {
@@ -2061,7 +2064,7 @@
       }
       idx += shift;
     }
-  } while (idx <= n - m);
+  }
 
   return -1;
 }
@@ -4316,9 +4319,15 @@
   }
 #endif
 
-  // Compile the target function.
+  // Compile the target function.  Here we compile using CompileLazyInLoop in
+  // order to get the optimized version.  This helps code like delta-blue
+  // that calls performance-critical routines through constructors.  A
+  // constructor call doesn't use a CallIC, it uses a LoadIC followed by a
+  // direct call.  Since the in-loop tracking takes place through CallICs
+  // this means that things called through constructors are never known to
+  // be in loops.  We compile them as if they are in loops here just in case.
   ASSERT(!function->is_compiled());
-  if (!CompileLazy(function, KEEP_EXCEPTION)) {
+  if (!CompileLazyInLoop(function, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
 
@@ -4896,16 +4905,14 @@
 
 static Object* Runtime_CompileString(Arguments args) {
   HandleScope scope;
-  ASSERT_EQ(3, args.length());
+  ASSERT_EQ(2, args.length());
   CONVERT_ARG_CHECKED(String, source, 0);
-  CONVERT_ARG_CHECKED(Smi, line_offset, 1);
-  CONVERT_ARG_CHECKED(Oddball, is_json, 2)
+  CONVERT_ARG_CHECKED(Oddball, is_json, 1)
 
   // Compile source string in the global context.
   Handle<Context> context(Top::context()->global_context());
   Handle<JSFunction> boilerplate = Compiler::CompileEval(source,
                                                          context,
-                                                         line_offset->value(),
                                                          true,
                                                          is_json->IsTrue());
   if (boilerplate.is_null()) return Failure::Exception();
@@ -4932,7 +4939,7 @@
 
   // Compile source string in the current context.
   Handle<JSFunction> boilerplate =
-      Compiler::CompileEval(source, context, 0, is_global, false);
+      Compiler::CompileEval(source, context, is_global, false);
   if (boilerplate.is_null()) return Failure::Exception();
   Handle<JSFunction> fun =
     Factory::NewFunctionFromBoilerplate(boilerplate, context);
@@ -5436,7 +5443,7 @@
 
 // Helper functions for wrapping and unwrapping stack frame ids.
 static Smi* WrapFrameId(StackFrame::Id id) {
-  ASSERT(IsAligned(OffsetFrom(id), 4));
+  ASSERT(IsAligned(OffsetFrom(id), static_cast<intptr_t>(4)));
   return Smi::FromInt(id >> 2);
 }
 
@@ -5485,7 +5492,8 @@
 }
 
 
-static Object* DebugLookupResultValue(Object* receiver, LookupResult* result,
+static Object* DebugLookupResultValue(Object* receiver, String* name,
+                                      LookupResult* result,
                                       bool* caught_exception) {
   Object* value;
   switch (result->type()) {
@@ -5510,11 +5518,18 @@
       return result->GetConstantFunction();
     case CALLBACKS: {
       Object* structure = result->GetCallbackObject();
-      if (structure->IsProxy()) {
-        AccessorDescriptor* callback =
-            reinterpret_cast<AccessorDescriptor*>(
-                Proxy::cast(structure)->proxy());
-        value = (callback->getter)(receiver, callback->data);
+      if (structure->IsProxy() || structure->IsAccessorInfo()) {
+        if (Debug::debugger_entry()) {
+          // SaveContext scope. It will restore debugger context after the
+          // getter execution.
+          SaveContext save;
+          Top::set_context(*Debug::debugger_entry()->GetContext());
+          value = receiver->GetPropertyWithCallback(
+              receiver, structure, name, result->holder());
+        } else {
+          value = receiver->GetPropertyWithCallback(
+              receiver, structure, name, result->holder());
+        }
         if (value->IsException()) {
           value = Top::pending_exception();
           Top::clear_pending_exception();
@@ -5595,7 +5610,7 @@
 
   if (result.IsProperty()) {
     bool caught_exception = false;
-    Object* value = DebugLookupResultValue(*obj, &result,
+    Object* value = DebugLookupResultValue(*obj, *name, &result,
                                            &caught_exception);
     if (value->IsFailure()) return value;
     Handle<Object> value_handle(value);
@@ -5631,7 +5646,7 @@
   LookupResult result;
   obj->Lookup(*name, &result);
   if (result.IsProperty()) {
-    return DebugLookupResultValue(*obj, &result, NULL);
+    return DebugLookupResultValue(*obj, *name, &result, NULL);
   }
   return Heap::undefined_value();
 }
@@ -6565,7 +6580,6 @@
   Handle<JSFunction> boilerplate =
       Compiler::CompileEval(function_source,
                             context,
-                            0,
                             context->IsGlobalContext(),
                             false);
   if (boilerplate.is_null()) return Failure::Exception();
@@ -6627,7 +6641,6 @@
   Handle<JSFunction> boilerplate =
       Handle<JSFunction>(Compiler::CompileEval(source,
                                                context,
-                                               0,
                                                true,
                                                false));
   if (boilerplate.is_null()) return Failure::Exception();
@@ -6646,67 +6659,15 @@
 }
 
 
-// If an object given is an external string, check that the underlying
-// resource is accessible. For other kinds of objects, always return true.
-static bool IsExternalStringValid(Object* str) {
-  if (!str->IsString() || !StringShape(String::cast(str)).IsExternal()) {
-    return true;
-  }
-  if (String::cast(str)->IsAsciiRepresentation()) {
-    return ExternalAsciiString::cast(str)->resource() != NULL;
-  } else if (String::cast(str)->IsTwoByteRepresentation()) {
-    return ExternalTwoByteString::cast(str)->resource() != NULL;
-  } else {
-    return true;
-  }
-}
-
-
-// Helper function used by Runtime_DebugGetLoadedScripts below.
-static int DebugGetLoadedScripts(FixedArray* instances, int instances_size) {
-  NoHandleAllocation ha;
-  AssertNoAllocation no_alloc;
-
-  // Scan heap for Script objects.
-  int count = 0;
-  HeapIterator iterator;
-  while (iterator.has_next()) {
-    HeapObject* obj = iterator.next();
-    ASSERT(obj != NULL);
-    if (obj->IsScript() && IsExternalStringValid(Script::cast(obj)->source())) {
-      if (instances != NULL && count < instances_size) {
-        instances->set(count, obj);
-      }
-      count++;
-    }
-  }
-
-  return count;
-}
-
-
 static Object* Runtime_DebugGetLoadedScripts(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 0);
 
-  // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
-  // rid of all the cached script wrappers and the second gets rid of the
-  // scripts which is no longer referenced.
-  Heap::CollectAllGarbage();
-  Heap::CollectAllGarbage();
-
-  // Get the number of scripts.
-  int count;
-  count = DebugGetLoadedScripts(NULL, 0);
-
-  // Allocate an array to hold the result.
-  Handle<FixedArray> instances = Factory::NewFixedArray(count);
-
   // Fill the script objects.
-  count = DebugGetLoadedScripts(*instances, count);
+  Handle<FixedArray> instances = Debug::GetLoadedScripts();
 
   // Convert the script objects to proper JS objects.
-  for (int i = 0; i < count; i++) {
+  for (int i = 0; i < instances->length(); i++) {
     Handle<Script> script = Handle<Script>(Script::cast(instances->get(i)));
     // Get the script wrapper in a local handle before calling GetScriptWrapper,
     // because using
diff --git a/src/runtime.h b/src/runtime.h
index 79661dc..474f1b3 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -28,7 +28,8 @@
 #ifndef V8_RUNTIME_H_
 #define V8_RUNTIME_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // The interface to C++ runtime functions.
 
@@ -200,7 +201,7 @@
   F(NumberIsFinite, 1) \
   \
   /* Globals */ \
-  F(CompileString, 3) \
+  F(CompileString, 2) \
   F(GlobalPrint, 1) \
   \
   /* Eval */ \
diff --git a/src/scanner.cc b/src/scanner.cc
index aab3ca3..24a6d4b 100644
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -30,7 +30,8 @@
 #include "ast.h"
 #include "scanner.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // ----------------------------------------------------------------------------
 // Character predicates
diff --git a/src/scanner.h b/src/scanner.h
index 6fb5afd..eea23a7 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -31,7 +31,8 @@
 #include "token.h"
 #include "char-predicates-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 class UTF8Buffer {
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index 6d2ade8..fedfbd6 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -32,7 +32,8 @@
 #include "scopeinfo.h"
 #include "scopes.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 static int CompareLocal(Variable* const* v, Variable* const* w) {
@@ -566,5 +567,6 @@
 // Make sure the classes get instantiated by the template system.
 template class ScopeInfo<FreeStoreAllocationPolicy>;
 template class ScopeInfo<PreallocatedStorage>;
+template class ScopeInfo<ZoneListAllocationPolicy>;
 
 } }  // namespace v8::internal
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index dbe235a..a097d34 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -30,7 +30,8 @@
 
 #include "variables.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Scope information represents information about a functions's
 // scopes (currently only one, because we don't do any inlining)
@@ -150,6 +151,18 @@
   List<Variable::Mode, Allocator > context_modes_;
 };
 
+class ZoneScopeInfo: public ScopeInfo<ZoneListAllocationPolicy> {
+ public:
+  // Create a ZoneScopeInfo instance from a scope.
+  explicit ZoneScopeInfo(Scope* scope)
+      : ScopeInfo<ZoneListAllocationPolicy>(scope) {}
+
+  // Create a ZoneScopeInfo instance from a Code object.
+  explicit ZoneScopeInfo(Code* code)
+      :  ScopeInfo<ZoneListAllocationPolicy>(code) {}
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_SCOPEINFO_H_
diff --git a/src/scopes.cc b/src/scopes.cc
index 7717eaa..7122eb0 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -31,7 +31,8 @@
 #include "scopeinfo.h"
 #include "scopes.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // ----------------------------------------------------------------------------
 // A Zone allocator for use with LocalsMap.
@@ -301,6 +302,8 @@
     List<Variable*, FreeStoreAllocationPolicy>* locals);
 template void Scope::CollectUsedVariables(
     List<Variable*, PreallocatedStorage>* locals);
+template void Scope::CollectUsedVariables(
+    List<Variable*, ZoneListAllocationPolicy>* locals);
 
 
 void Scope::AllocateVariables(Handle<Context> context) {
diff --git a/src/scopes.h b/src/scopes.h
index 129acc6..b2f61ef 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -31,7 +31,8 @@
 #include "ast.h"
 #include "hashmap.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // A hash map to support fast local variable declaration and lookup.
diff --git a/src/serialize.cc b/src/serialize.cc
index 62287bc..fb66d27 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -39,7 +39,8 @@
 #include "stub-cache.h"
 #include "v8threads.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Encoding: a RelativeAddress must be able to fit in a pointer:
 // it is encoded as an Address with (from MS to LS bits):
diff --git a/src/serialize.h b/src/serialize.h
index f6594ac..7f4eb63 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -30,7 +30,8 @@
 
 #include "hashmap.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // A TypeCode is used to distinguish different kinds of external reference.
 // It is a single bit to make testing for types easy.
diff --git a/src/shell.h b/src/shell.h
index 6712451..ca51040 100644
--- a/src/shell.h
+++ b/src/shell.h
@@ -32,7 +32,8 @@
 
 #include "../public/debug.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Debug event handler for interactive debugging.
 void handle_debug_event(v8::DebugEvent event,
diff --git a/src/smart-pointer.h b/src/smart-pointer.h
index c39df16..0fa8224 100644
--- a/src/smart-pointer.h
+++ b/src/smart-pointer.h
@@ -28,7 +28,8 @@
 #ifndef V8_SMART_POINTER_H_
 #define V8_SMART_POINTER_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // A 'scoped array pointer' that calls DeleteArray on its pointer when the
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index 3aa1cae..9c66a50 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -33,7 +33,8 @@
 #include "serialize.h"
 #include "snapshot.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 bool Snapshot::Deserialize(const byte* content, int len) {
   Deserializer des(content, len);
diff --git a/src/snapshot-empty.cc b/src/snapshot-empty.cc
index d4cda19..60ab1e5 100644
--- a/src/snapshot-empty.cc
+++ b/src/snapshot-empty.cc
@@ -31,7 +31,8 @@
 
 #include "snapshot.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 const byte Snapshot::data_[] = { 0 };
 int Snapshot::size_ = 0;
diff --git a/src/snapshot.h b/src/snapshot.h
index b3f23d3..88ba8db 100644
--- a/src/snapshot.h
+++ b/src/snapshot.h
@@ -28,7 +28,8 @@
 #ifndef V8_SNAPSHOT_H_
 #define V8_SNAPSHOT_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 class Snapshot {
  public:
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 3973658..2f01164 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -31,7 +31,8 @@
 #include "memory.h"
 #include "spaces.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // -----------------------------------------------------------------------------
@@ -92,8 +93,10 @@
 
 
 void Page::ClearRSet() {
+#ifndef V8_HOST_ARCH_64_BIT
   // This method can be called in all rset states.
   memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
+#endif
 }
 
 
@@ -157,9 +160,14 @@
 
 
 bool Page::IsRSetSet(Address address, int offset) {
+#ifdef V8_HOST_ARCH_64_BIT
+  // TODO(X64): Reenable when RSet works.
+  return true;
+#else  // V8_HOST_ARCH_64_BIT
   uint32_t bitmask = 0;
   Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
   return (Memory::uint32_at(rset_address) & bitmask) != 0;
+#endif  // V8_HOST_ARCH_64_BIT
 }
 
 
@@ -194,7 +202,7 @@
 
 Page* MemoryAllocator::GetNextPage(Page* p) {
   ASSERT(p->is_valid());
-  int raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
+  intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
   return Page::FromAddress(AddressFrom<Address>(raw_addr));
 }
 
@@ -207,7 +215,7 @@
 
 void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
   ASSERT(prev->is_valid());
-  int chunk_id = prev->opaque_header & Page::kPageAlignmentMask;
+  int chunk_id = GetChunkId(prev);
   ASSERT_PAGE_ALIGNED(next->address());
   prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
 }
diff --git a/src/spaces.cc b/src/spaces.cc
index ee801cb..e61c6ad 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -31,7 +31,8 @@
 #include "mark-compact.h"
 #include "platform.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // For contiguous spaces, top should be in the space (or at the end) and limit
 // should be the end of the space.
@@ -2422,6 +2423,13 @@
 void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
   ASSERT(Page::is_rset_in_use());
 
+  static void* lo_rset_histogram = StatsTable::CreateHistogram(
+      "V8.RSetLO",
+      0,
+      // Keeping this histogram's buckets the same as the paged space histogram.
+      Page::kObjectAreaSize / kPointerSize,
+      30);
+
   LargeObjectIterator it(this);
   while (it.has_next()) {
     // We only have code, sequential strings, or fixed arrays in large
@@ -2432,15 +2440,18 @@
       // Iterate the normal page remembered set range.
       Page* page = Page::FromAddress(object->address());
       Address object_end = object->address() + object->Size();
-      Heap::IterateRSetRange(page->ObjectAreaStart(),
-                             Min(page->ObjectAreaEnd(), object_end),
-                             page->RSetStart(),
-                             copy_object_func);
+      int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
+                                         Min(page->ObjectAreaEnd(), object_end),
+                                         page->RSetStart(),
+                                         copy_object_func);
 
       // Iterate the extra array elements.
       if (object_end > page->ObjectAreaEnd()) {
-        Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
-                               object_end, copy_object_func);
+        count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
+                                        object_end, copy_object_func);
+      }
+      if (lo_rset_histogram != NULL) {
+        StatsTable::AddHistogramSample(lo_rset_histogram, count);
       }
     }
   }
diff --git a/src/spaces.h b/src/spaces.h
index e8504a4..a62b0a8 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -31,7 +31,8 @@
 #include "list-inl.h"
 #include "log.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -----------------------------------------------------------------------------
 // Heap structures:
@@ -98,6 +99,7 @@
 // its page offset by 32. Therefore, the object area in a page starts at the
 // 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
 // the first two words (64 bits) in a page can be used for other purposes.
+// TODO(X64): This description only represents the 32-bit layout.
 //
 // The mark-compact collector transforms a map pointer into a page index and a
 // page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
@@ -213,7 +215,7 @@
   static const int kPageSize = 1 << kPageSizeBits;
 
   // Page size mask.
-  static const int kPageAlignmentMask = (1 << kPageSizeBits) - 1;
+  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
 
   // The end offset of the remembered set in a page
   // (heaps are aligned to pointer size).
@@ -242,7 +244,7 @@
   // in the current page.  If a page is in the large object space, the first
   // word *may* (if the page start and large object chunk start are the
   // same) contain the address of the next large object chunk.
-  int opaque_header;
+  intptr_t opaque_header;
 
   // If the page is not in the large object space, the low-order bit of the
   // second word is set. If the page is in the large object space, the
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 2e0912f..44ba297 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -30,7 +30,8 @@
 #include "factory.h"
 #include "string-stream.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 static const int kMentionedObjectCacheMaxSize = 256;
 static List<HeapObject*, PreallocatedStorage>* debug_object_cache = NULL;
diff --git a/src/string-stream.h b/src/string-stream.h
index fa20064..15a72e0 100644
--- a/src/string-stream.h
+++ b/src/string-stream.h
@@ -28,7 +28,8 @@
 #ifndef V8_STRING_STREAM_H_
 #define V8_STRING_STREAM_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 class StringAllocator {
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 6811fd2..ca14d1a 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,7 +32,8 @@
 #include "ic-inl.h"
 #include "stub-cache.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -----------------------------------------------------------------------
 // StubCache implementation.
@@ -369,6 +370,7 @@
 
 
 Object* StubCache::ComputeCallConstant(int argc,
+                                       InLoopFlag in_loop,
                                        String* name,
                                        Object* object,
                                        JSObject* holder,
@@ -387,7 +389,10 @@
   }
 
   Code::Flags flags =
-      Code::ComputeMonomorphicFlags(Code::CALL_IC, CONSTANT_FUNCTION, argc);
+      Code::ComputeMonomorphicFlags(Code::CALL_IC,
+                                    CONSTANT_FUNCTION,
+                                    in_loop,
+                                    argc);
   Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     if (object->IsJSObject()) {
@@ -405,7 +410,7 @@
     if (!function->is_compiled()) return Failure::InternalError();
     // Compile the stub - only create stubs for fully compiled functions.
     CallStubCompiler compiler(argc);
-    code = compiler.CompileCallConstant(object, holder, function, check);
+    code = compiler.CompileCallConstant(object, holder, function, check, flags);
     if (code->IsFailure()) return code;
     LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
     Object* result = map->UpdateCodeCache(name, Code::cast(code));
@@ -416,6 +421,7 @@
 
 
 Object* StubCache::ComputeCallField(int argc,
+                                    InLoopFlag in_loop,
                                     String* name,
                                     Object* object,
                                     JSObject* holder,
@@ -430,11 +436,14 @@
     object = holder;
   }
 
-  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC, FIELD, argc);
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
+                                                    FIELD,
+                                                    in_loop,
+                                                    argc);
   Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     CallStubCompiler compiler(argc);
-    code = compiler.CompileCallField(object, holder, index, name);
+    code = compiler.CompileCallField(object, holder, index, name, flags);
     if (code->IsFailure()) return code;
     LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
     Object* result = map->UpdateCodeCache(name, Code::cast(code));
@@ -460,7 +469,10 @@
   }
 
   Code::Flags flags =
-      Code::ComputeMonomorphicFlags(Code::CALL_IC, INTERCEPTOR, argc);
+      Code::ComputeMonomorphicFlags(Code::CALL_IC,
+                                    INTERCEPTOR,
+                                    NOT_IN_LOOP,
+                                    argc);
   Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     CallStubCompiler compiler(argc);
@@ -475,9 +487,10 @@
 
 
 Object* StubCache::ComputeCallNormal(int argc,
+                                     InLoopFlag in_loop,
                                      String* name,
                                      JSObject* receiver) {
-  Object* code = ComputeCallNormal(argc);
+  Object* code = ComputeCallNormal(argc, in_loop);
   if (code->IsFailure()) return code;
   return Set(name, receiver->map(), Code::cast(code));
 }
@@ -522,9 +535,9 @@
 }
 
 
-Code* StubCache::FindCallInitialize(int argc) {
+Code* StubCache::FindCallInitialize(int argc, InLoopFlag in_loop) {
   Code::Flags flags =
-      Code::ComputeFlags(Code::CALL_IC, UNINITIALIZED, NORMAL, argc);
+      Code::ComputeFlags(Code::CALL_IC, in_loop, UNINITIALIZED, NORMAL, argc);
   Object* result = ProbeCache(flags);
   ASSERT(!result->IsUndefined());
   // This might be called during the marking phase of the collector
@@ -533,9 +546,9 @@
 }
 
 
-Object* StubCache::ComputeCallInitialize(int argc) {
+Object* StubCache::ComputeCallInitialize(int argc, InLoopFlag in_loop) {
   Code::Flags flags =
-      Code::ComputeFlags(Code::CALL_IC, UNINITIALIZED, NORMAL, argc);
+      Code::ComputeFlags(Code::CALL_IC, in_loop, UNINITIALIZED, NORMAL, argc);
   Object* probe = ProbeCache(flags);
   if (!probe->IsUndefined()) return probe;
   StubCompiler compiler;
@@ -543,20 +556,9 @@
 }
 
 
-Object* StubCache::ComputeCallInitializeInLoop(int argc) {
+Object* StubCache::ComputeCallPreMonomorphic(int argc, InLoopFlag in_loop) {
   Code::Flags flags =
-      Code::ComputeFlags(Code::CALL_IC, UNINITIALIZED_IN_LOOP, NORMAL, argc);
-  Object* probe = ProbeCache(flags);
-  if (!probe->IsUndefined()) return probe;
-  StubCompiler compiler;
-  return FillCache(compiler.CompileCallInitialize(flags));
-}
-
-
-
-Object* StubCache::ComputeCallPreMonomorphic(int argc) {
-  Code::Flags flags =
-      Code::ComputeFlags(Code::CALL_IC, PREMONOMORPHIC, NORMAL, argc);
+      Code::ComputeFlags(Code::CALL_IC, in_loop, PREMONOMORPHIC, NORMAL, argc);
   Object* probe = ProbeCache(flags);
   if (!probe->IsUndefined()) return probe;
   StubCompiler compiler;
@@ -564,9 +566,9 @@
 }
 
 
-Object* StubCache::ComputeCallNormal(int argc) {
+Object* StubCache::ComputeCallNormal(int argc, InLoopFlag in_loop) {
   Code::Flags flags =
-      Code::ComputeFlags(Code::CALL_IC, MONOMORPHIC, NORMAL, argc);
+      Code::ComputeFlags(Code::CALL_IC, in_loop, MONOMORPHIC, NORMAL, argc);
   Object* probe = ProbeCache(flags);
   if (!probe->IsUndefined()) return probe;
   StubCompiler compiler;
@@ -574,9 +576,9 @@
 }
 
 
-Object* StubCache::ComputeCallMegamorphic(int argc) {
+Object* StubCache::ComputeCallMegamorphic(int argc, InLoopFlag in_loop) {
   Code::Flags flags =
-      Code::ComputeFlags(Code::CALL_IC, MEGAMORPHIC, NORMAL, argc);
+      Code::ComputeFlags(Code::CALL_IC, in_loop, MEGAMORPHIC, NORMAL, argc);
   Object* probe = ProbeCache(flags);
   if (!probe->IsUndefined()) return probe;
   StubCompiler compiler;
@@ -586,7 +588,7 @@
 
 Object* StubCache::ComputeCallMiss(int argc) {
   Code::Flags flags =
-      Code::ComputeFlags(Code::STUB, MEGAMORPHIC, NORMAL, argc);
+      Code::ComputeFlags(Code::STUB, NOT_IN_LOOP, MEGAMORPHIC, NORMAL, argc);
   Object* probe = ProbeCache(flags);
   if (!probe->IsUndefined()) return probe;
   StubCompiler compiler;
@@ -597,7 +599,7 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 Object* StubCache::ComputeCallDebugBreak(int argc) {
   Code::Flags flags =
-      Code::ComputeFlags(Code::CALL_IC, DEBUG_BREAK, NORMAL, argc);
+      Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, DEBUG_BREAK, NORMAL, argc);
   Object* probe = ProbeCache(flags);
   if (!probe->IsUndefined()) return probe;
   StubCompiler compiler;
@@ -607,7 +609,11 @@
 
 Object* StubCache::ComputeCallDebugPrepareStepIn(int argc) {
   Code::Flags flags =
-      Code::ComputeFlags(Code::CALL_IC, DEBUG_PREPARE_STEP_IN, NORMAL, argc);
+      Code::ComputeFlags(Code::CALL_IC,
+                         NOT_IN_LOOP,
+                         DEBUG_PREPARE_STEP_IN,
+                         NORMAL,
+                         argc);
   Object* probe = ProbeCache(flags);
   if (!probe->IsUndefined()) return probe;
   StubCompiler compiler;
@@ -618,7 +624,7 @@
 
 Object* StubCache::ComputeLazyCompile(int argc) {
   Code::Flags flags =
-      Code::ComputeFlags(Code::STUB, UNINITIALIZED, NORMAL, argc);
+      Code::ComputeFlags(Code::STUB, NOT_IN_LOOP, UNINITIALIZED, NORMAL, argc);
   Object* probe = ProbeCache(flags);
   if (!probe->IsUndefined()) return probe;
   StubCompiler compiler;
@@ -917,7 +923,10 @@
 
 Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
   int argc = arguments_.immediate();
-  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC, type, argc);
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
+                                                    type,
+                                                    NOT_IN_LOOP,
+                                                    argc);
   return GetCodeWithFlags(flags, name);
 }
 
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 369b15d..ed513a0 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -30,7 +30,8 @@
 
 #include "macro-assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // The stub cache is used for megamorphic calls and property accesses.
@@ -127,18 +128,23 @@
   // ---
 
   static Object* ComputeCallField(int argc,
+                                  InLoopFlag in_loop,
                                   String* name,
                                   Object* object,
                                   JSObject* holder,
                                   int index);
 
   static Object* ComputeCallConstant(int argc,
+                                     InLoopFlag in_loop,
                                      String* name,
                                      Object* object,
                                      JSObject* holder,
                                      JSFunction* function);
 
-  static Object* ComputeCallNormal(int argc, String* name, JSObject* receiver);
+  static Object* ComputeCallNormal(int argc,
+                                   InLoopFlag in_loop,
+                                   String* name,
+                                   JSObject* receiver);
 
   static Object* ComputeCallInterceptor(int argc,
                                         String* name,
@@ -147,15 +153,14 @@
 
   // ---
 
-  static Object* ComputeCallInitialize(int argc);
-  static Object* ComputeCallInitializeInLoop(int argc);
-  static Object* ComputeCallPreMonomorphic(int argc);
-  static Object* ComputeCallNormal(int argc);
-  static Object* ComputeCallMegamorphic(int argc);
+  static Object* ComputeCallInitialize(int argc, InLoopFlag in_loop);
+  static Object* ComputeCallPreMonomorphic(int argc, InLoopFlag in_loop);
+  static Object* ComputeCallNormal(int argc, InLoopFlag in_loop);
+  static Object* ComputeCallMegamorphic(int argc, InLoopFlag in_loop);
   static Object* ComputeCallMiss(int argc);
 
   // Finds the Code object stored in the Heap::non_monomorphic_cache().
-  static Code* FindCallInitialize(int argc);
+  static Code* FindCallInitialize(int argc, InLoopFlag in_loop);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   static Object* ComputeCallDebugBreak(int argc);
@@ -208,8 +213,12 @@
     // 4Gb (and not at all if it isn't).
     uint32_t map_low32bits =
         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
+    // We always set the in_loop bit to zero when generating the lookup code
+    // so do it here too so the hash codes match.
+    uint32_t iflags =
+        (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
     // Base the offset on a simple combination of name, flags, and map.
-    uint32_t key = (map_low32bits + field) ^ flags;
+    uint32_t key = (map_low32bits + field) ^ iflags;
     return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
   }
 
@@ -217,7 +226,11 @@
     // Use the seed from the primary cache in the secondary cache.
     uint32_t string_low32bits =
         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
-    uint32_t key = seed - string_low32bits + flags;
+    // We always set the in_loop bit to zero when generating the lookup code
+    // so do it here too so the hash codes match.
+    uint32_t iflags =
+        (static_cast<uint32_t>(flags) & ~Code::kFlagsICInLoopMask);
+    uint32_t key = seed - string_low32bits + iflags;
     return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
   }
 
@@ -468,11 +481,13 @@
   Object* CompileCallField(Object* object,
                            JSObject* holder,
                            int index,
-                           String* name);
+                           String* name,
+                           Code::Flags flags);
   Object* CompileCallConstant(Object* object,
                               JSObject* holder,
                               JSFunction* function,
-                              CheckType check);
+                              CheckType check,
+                              Code::Flags flags);
   Object* CompileCallInterceptor(Object* object,
                                  JSObject* holder,
                                  String* name);
diff --git a/src/token.cc b/src/token.cc
index 3f92707..bb42cea 100644
--- a/src/token.cc
+++ b/src/token.cc
@@ -29,7 +29,8 @@
 
 #include "token.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #ifdef DEBUG
 #define T(name, string, precedence) #name,
diff --git a/src/token.h b/src/token.h
index 9ca2476..4d4df63 100644
--- a/src/token.h
+++ b/src/token.h
@@ -28,7 +28,8 @@
 #ifndef V8_TOKEN_H_
 #define V8_TOKEN_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // TOKEN_LIST takes a list of 3 macros M, all of which satisfy the
 // same signature M(name, string, precedence), where name is the
diff --git a/src/top.cc b/src/top.cc
index b5a0b94..b2583db 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -34,7 +34,8 @@
 #include "string-stream.h"
 #include "platform.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 ThreadLocalTop Top::thread_local_;
 Mutex* Top::break_access_ = OS::CreateMutex();
diff --git a/src/top.h b/src/top.h
index e0cfdc3..8e928ed 100644
--- a/src/top.h
+++ b/src/top.h
@@ -30,7 +30,8 @@
 
 #include "frames-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 #define RETURN_IF_SCHEDULED_EXCEPTION() \
diff --git a/src/usage-analyzer.cc b/src/usage-analyzer.cc
index b7a1934..36464fa 100644
--- a/src/usage-analyzer.cc
+++ b/src/usage-analyzer.cc
@@ -31,7 +31,8 @@
 #include "scopes.h"
 #include "usage-analyzer.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Weight boundaries
 static const int MinWeight = 1;
diff --git a/src/usage-analyzer.h b/src/usage-analyzer.h
index 2369422..1b0ea4a 100644
--- a/src/usage-analyzer.h
+++ b/src/usage-analyzer.h
@@ -28,7 +28,8 @@
 #ifndef V8_USAGE_ANALYZER_H_
 #define V8_USAGE_ANALYZER_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Compute usage counts for all variables.
 // Used for variable allocation.
diff --git a/src/utils.cc b/src/utils.cc
index 3920320..d56d279 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -33,7 +33,8 @@
 
 #include "sys/stat.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
@@ -86,6 +87,20 @@
 }
 
 
+// Thomas Wang, Integer Hash Functions.
+// http://www.concentric.net/~Ttwang/tech/inthash.htm
+uint32_t ComputeIntegerHash(uint32_t key) {
+  uint32_t hash = key;
+  hash = ~hash + (hash << 15);  // hash = (hash << 15) - hash - 1;
+  hash = hash ^ (hash >> 12);
+  hash = hash + (hash << 2);
+  hash = hash ^ (hash >> 4);
+  hash = hash * 2057;  // hash = (hash + (hash << 3)) + (hash << 11);
+  hash = hash ^ (hash >> 16);
+  return hash;
+}
+
+
 void PrintF(const char* format, ...) {
   va_list arguments;
   va_start(arguments, format);
diff --git a/src/utils.h b/src/utils.h
index dea7d1f..36a929c 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -30,7 +30,8 @@
 
 #include <stdlib.h>
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // ----------------------------------------------------------------------------
 // General helper functions
@@ -54,7 +55,7 @@
 // This allows conversion of Addresses and integral types into
 // 0-relative int offsets.
 template <typename T>
-static inline int OffsetFrom(T x) {
+static inline intptr_t OffsetFrom(T x) {
   return x - static_cast<T>(0);
 }
 
@@ -63,7 +64,7 @@
 // This allows conversion of 0-relative int offsets into Addresses and
 // integral types.
 template <typename T>
-static inline T AddressFrom(int x) {
+static inline T AddressFrom(intptr_t x) {
   return static_cast<T>(0) + x;
 }
 
@@ -205,6 +206,12 @@
 
 
 // ----------------------------------------------------------------------------
+// Hash function.
+
+uint32_t ComputeIntegerHash(uint32_t key);
+
+
+// ----------------------------------------------------------------------------
 // I/O support.
 
 // Our version of printf(). Avoids compilation errors that we get
diff --git a/src/v8-counters.cc b/src/v8-counters.cc
index 3a8286a..de2ce66 100644
--- a/src/v8-counters.cc
+++ b/src/v8-counters.cc
@@ -29,7 +29,8 @@
 
 #include "v8-counters.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #define HT(name, caption) \
   HistogramTimer Counters::name = { #caption, NULL, false, 0, 0 }; \
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 589b887..4111312 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -30,7 +30,8 @@
 
 #include "counters.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 #define HISTOGRAM_TIMER_LIST(HT)                                      \
   /* Garbage collection timers. */                                    \
diff --git a/src/v8.cc b/src/v8.cc
index c0124e4..17cb2df 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -33,16 +33,23 @@
 #include "stub-cache.h"
 #include "oprofile-agent.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
+bool V8::is_running_ = false;
 bool V8::has_been_setup_ = false;
 bool V8::has_been_disposed_ = false;
+bool V8::has_fatal_error_ = false;
 
 bool V8::Initialize(Deserializer *des) {
   bool create_heap_objects = des == NULL;
-  if (HasBeenDisposed()) return false;
-  if (HasBeenSetup()) return true;
+  if (has_been_disposed_ || has_fatal_error_) return false;
+  if (IsRunning()) return true;
+
+  is_running_ = true;
   has_been_setup_ = true;
+  has_fatal_error_ = false;
+  has_been_disposed_ = false;
 #ifdef DEBUG
   // The initialization process does not handle memory exhaustion.
   DisallowAllocationFailure disallow_allocation_failure;
@@ -58,7 +65,7 @@
   // Setup the object heap
   ASSERT(!Heap::HasBeenSetup());
   if (!Heap::Setup(create_heap_objects)) {
-    has_been_setup_ = false;
+    SetFatalError();
     return false;
   }
 
@@ -94,9 +101,14 @@
 }
 
 
+void V8::SetFatalError() {
+  is_running_ = false;
+  has_fatal_error_ = true;
+}
+
+
 void V8::TearDown() {
-  if (HasBeenDisposed()) return;
-  if (!HasBeenSetup()) return;
+  if (!has_been_setup_ || has_been_disposed_) return;
 
   OProfileAgent::TearDown();
 
@@ -113,8 +125,9 @@
   Heap::TearDown();
   Logger::TearDown();
 
-  has_been_setup_ = false;
+  is_running_ = false;
   has_been_disposed_ = true;
 }
 
+
 } }  // namespace v8::internal
diff --git a/src/v8.h b/src/v8.h
index 4ced0d2..8cb3c7d 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -73,7 +73,8 @@
 #include "heap-inl.h"
 #include "messages.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 class V8 : public AllStatic {
  public:
@@ -85,13 +86,23 @@
   // deserialized data into an empty heap.
   static bool Initialize(Deserializer* des);
   static void TearDown();
-  static bool HasBeenSetup() { return has_been_setup_; }
-  static bool HasBeenDisposed() { return has_been_disposed_; }
+  static bool IsRunning() { return is_running_; }
+  // To be dead you have to have lived
+  static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
+  static void SetFatalError();
 
   // Report process out of memory. Implementation found in api.cc.
   static void FatalProcessOutOfMemory(const char* location);
  private:
+  // True if engine is currently running
+  static bool is_running_;
+  // True if V8 has ever been run
   static bool has_been_setup_;
+  // True if error has been signaled for current engine
+  // (reset to false if engine is restarted)
+  static bool has_fatal_error_;
+  // True if engine has been shut down
+  // (reset if engine is restarted)
   static bool has_been_disposed_;
 };
 
diff --git a/src/v8natives.js b/src/v8natives.js
index 55bc9f8..2edb191 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -120,7 +120,7 @@
                          'be the global object from which eval originated');
   }
   
-  var f = %CompileString(x, 0, false);
+  var f = %CompileString(x, false);
   if (!IS_FUNCTION(f)) return f;
 
   return f.call(this);
@@ -131,7 +131,7 @@
 function GlobalExecScript(expr, lang) {
   // NOTE: We don't care about the character casing.
   if (!lang || /javascript/i.test(lang)) {
-    var f = %CompileString(ToString(expr), 0, false);
+    var f = %CompileString(ToString(expr), false);
     f.call(%GlobalReceiver(global));
   }
   return null;
@@ -550,7 +550,7 @@
 
   // The call to SetNewFunctionAttributes will ensure the prototype
   // property of the resulting function is enumerable (ECMA262, 15.3.5.2).
-  var f = %CompileString(source, -1, false)();
+  var f = %CompileString(source, false)();
   %FunctionSetName(f, "anonymous");
   return %SetNewFunctionAttributes(f);
 }
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 838cae7..c5fc9fa 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -261,6 +261,8 @@
   ThreadState* state = lazily_archived_thread_state_;
   state->LinkInto(ThreadState::IN_USE_LIST);
   char* to = state->data();
+  // Ensure that data containing GC roots are archived first, and handle them
+  // in ThreadManager::Iterate(ObjectVisitor*).
   to = HandleScopeImplementer::ArchiveThread(to);
   to = Top::ArchiveThread(to);
 #ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/v8threads.h b/src/v8threads.h
index b651fc3..83f69f0 100644
--- a/src/v8threads.h
+++ b/src/v8threads.h
@@ -28,7 +28,8 @@
 #ifndef V8_V8THREADS_H_
 #define V8_V8THREADS_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 class ThreadState {
diff --git a/src/variables.cc b/src/variables.cc
index 51eb8ca..6c9f82f 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -31,7 +31,8 @@
 #include "scopes.h"
 #include "variables.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // ----------------------------------------------------------------------------
 // Implementation UseCount.
diff --git a/src/variables.h b/src/variables.h
index 275f498..5062071 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -30,7 +30,8 @@
 
 #include "zone.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 class UseCount BASE_EMBEDDED {
  public:
diff --git a/src/version.cc b/src/version.cc
index 37eb083..8d35d1b 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     1
 #define MINOR_VERSION     2
-#define BUILD_NUMBER      4
-#define PATCH_LEVEL       3
+#define BUILD_NUMBER      5
+#define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
@@ -43,7 +43,8 @@
 // number. This define is mainly used by the SCons build script.
 #define SONAME            ""
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 int Version::major_ = MAJOR_VERSION;
 int Version::minor_ = MINOR_VERSION;
diff --git a/src/version.h b/src/version.h
index 423b5f7..c322a2f 100644
--- a/src/version.h
+++ b/src/version.h
@@ -28,7 +28,8 @@
 #ifndef V8_VERSION_H_
 #define V8_VERSION_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 class Version {
  public:
diff --git a/src/virtual-frame.cc b/src/virtual-frame.cc
index 9249cbf..f92f4a2 100644
--- a/src/virtual-frame.cc
+++ b/src/virtual-frame.cc
@@ -30,41 +30,21 @@
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // VirtualFrame implementation.
 
-VirtualFrame::SpilledScope::SpilledScope(CodeGenerator* cgen)
-    : cgen_(cgen),
-      previous_state_(cgen->in_spilled_code()) {
-  ASSERT(cgen->has_valid_frame());
-  cgen->frame()->SpillAll();
-  cgen->set_in_spilled_code(true);
-}
-
-
-VirtualFrame::SpilledScope::~SpilledScope() {
-  cgen_->set_in_spilled_code(previous_state_);
-}
-
-
 // When cloned, a frame is a deep copy of the original.
 VirtualFrame::VirtualFrame(VirtualFrame* original)
-    : cgen_(original->cgen_),
-      masm_(original->masm_),
-      elements_(original->elements_.capacity()),
-      parameter_count_(original->parameter_count_),
-      local_count_(original->local_count_),
-      stack_pointer_(original->stack_pointer_),
-      frame_pointer_(original->frame_pointer_) {
-  // Copy all the elements from the original.
-  for (int i = 0; i < original->elements_.length(); i++) {
-    elements_.Add(original->elements_[i]);
-  }
-  for (int i = 0; i < kNumRegisters; i++) {
-    register_locations_[i] = original->register_locations_[i];
-  }
+    : elements_(original->elements_.length()),
+      stack_pointer_(original->stack_pointer_) {
+  elements_.AddAll(original->elements_);
+  // Copy register locations from original.
+  memcpy(&register_locations_,
+         original->register_locations_,
+         sizeof(register_locations_));
 }
 
 
@@ -125,19 +105,6 @@
 }
 
 
-// Modify the state of the virtual frame to match the actual frame by
-// removing elements from the top of the virtual frame.  The elements will
-// be externally popped from the actual frame (eg, by a runtime call).  No
-// code is emitted.
-void VirtualFrame::Forget(int count) {
-  ASSERT(count >= 0);
-  ASSERT(stack_pointer_ == elements_.length() - 1);
-
-  stack_pointer_ -= count;
-  ForgetElements(count);
-}
-
-
 void VirtualFrame::ForgetElements(int count) {
   ASSERT(count >= 0);
   ASSERT(elements_.length() >= count);
@@ -148,7 +115,7 @@
       // A hack to properly count register references for the code
       // generator's current frame and also for other frames.  The
       // same code appears in PrepareMergeTo.
-      if (cgen_->frame() == this) {
+      if (cgen()->frame() == this) {
         Unuse(last.reg());
       } else {
         register_locations_[last.reg().code()] = kIllegalIndex;
@@ -158,36 +125,15 @@
 }
 
 
-void VirtualFrame::Use(Register reg, int index) {
-  ASSERT(register_locations_[reg.code()] == kIllegalIndex);
-  register_locations_[reg.code()] = index;
-  cgen_->allocator()->Use(reg);
-}
-
-
-void VirtualFrame::Unuse(Register reg) {
-  ASSERT(register_locations_[reg.code()] != kIllegalIndex);
-  register_locations_[reg.code()] = kIllegalIndex;
-  cgen_->allocator()->Unuse(reg);
-}
-
-
-void VirtualFrame::Spill(Register target) {
-  if (is_used(target)) {
-    SpillElementAt(register_index(target));
-  }
-}
-
-
 // If there are any registers referenced only by the frame, spill one.
 Register VirtualFrame::SpillAnyRegister() {
   // Find the leftmost (ordered by register code) register whose only
   // reference is in the frame.
   for (int i = 0; i < kNumRegisters; i++) {
-    if (is_used(i) && cgen_->allocator()->count(i) == 1) {
+    if (is_used(i) && cgen()->allocator()->count(i) == 1) {
       Register result = { i };
       Spill(result);
-      ASSERT(!cgen_->allocator()->is_used(result));
+      ASSERT(!cgen()->allocator()->is_used(result));
       return result;
     }
   }
@@ -251,7 +197,7 @@
         // If the frame is the code generator's current frame, we have
         // to decrement both the frame-internal and global register
         // counts.
-        if (cgen_->frame() == this) {
+        if (cgen()->frame() == this) {
           Unuse(source.reg());
         } else {
           register_locations_[source.reg().code()] = kIllegalIndex;
@@ -266,12 +212,6 @@
       ASSERT(source.is_valid());
       elements_[i].clear_sync();
     }
-
-    elements_[i].clear_copied();
-    if (elements_[i].is_copy()) {
-      elements_[elements_[i].index()].set_copied();
-    }
-
     // No code needs to be generated to change the static type of an
     // element.
     elements_[i].set_static_type(target.static_type());
@@ -307,11 +247,12 @@
 void VirtualFrame::PrepareForReturn() {
   // Spill all locals. This is necessary to make sure all locals have
   // the right value when breaking at the return site in the debugger.
-  //
-  // TODO(203): It is also necessary to ensure that merging at the
-  // return site does not generate code to overwrite eax, where the
-  // return value is kept in a non-refcounted register reference.
-  for (int i = 0; i < expression_base_index(); i++) SpillElementAt(i);
+  // Set their static type to unknown so that they will match the known
+  // return frame.
+  for (int i = 0; i < expression_base_index(); i++) {
+    SpillElementAt(i);
+    elements_[i].set_static_type(StaticType::unknown());
+  }
 }
 
 
@@ -384,14 +325,7 @@
 
 
 void VirtualFrame::PushFrameSlotAt(int index) {
-  FrameElement new_element = CopyElementAt(index);
-  elements_.Add(new_element);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  return RawCallStub(stub);
+  elements_.Add(CopyElementAt(index));
 }
 
 
@@ -419,17 +353,6 @@
 }
 
 
-void VirtualFrame::Push(Result* result) {
-  if (result->is_register()) {
-    Push(result->reg(), result->static_type());
-  } else {
-    ASSERT(result->is_constant());
-    Push(result->handle());
-  }
-  result->Unuse();
-}
-
-
 void VirtualFrame::Nip(int num_dropped) {
   ASSERT(num_dropped >= 0);
   if (num_dropped == 0) return;
@@ -443,14 +366,6 @@
 
 bool VirtualFrame::Equals(VirtualFrame* other) {
 #ifdef DEBUG
-  // These are sanity checks in debug builds, but we do not need to
-  // use them to distinguish frames at merge points.
-  if (cgen_ != other->cgen_) return false;
-  if (masm_ != other->masm_) return false;
-  if (parameter_count_ != other->parameter_count_) return false;
-  if (local_count_ != other->local_count_) return false;
-  if (frame_pointer_ != other->frame_pointer_) return false;
-
   for (int i = 0; i < kNumRegisters; i++) {
     if (register_locations_[i] != other->register_locations_[i]) {
       return false;
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index b617507..ff37f05 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -30,13 +30,47 @@
 
 #include "cpu.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 Condition NegateCondition(Condition cc) {
   return static_cast<Condition>(cc ^ 1);
 }
 
 
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+#define EMIT(x)                                 \
+  *pc_++ = (x)
+
+
+void Assembler::emit_rex_64(Register reg, Register rm_reg) {
+  EMIT(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+}
+
+
+void Assembler::emit_rex_64(Register reg, const Operand& op) {
+  EMIT(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
+}
+
+
+void Assembler::set_target_address_at(byte* location, byte* value) {
+  UNIMPLEMENTED();
+}
+
+
+byte* Assembler::target_address_at(byte* location) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+#undef EMIT
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
 // The modes possibly affected by apply must be in kApplyMask.
 void RelocInfo::apply(int delta) {
   if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
@@ -71,19 +105,6 @@
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
 }
-
-
-void Assembler::set_target_address_at(byte* location, byte* value) {
-  UNIMPLEMENTED();
-}
-
-
-byte* Assembler::target_address_at(byte* location) {
-  UNIMPLEMENTED();
-  return NULL;
-}
-
-
 Object* RelocInfo::target_object() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return *reinterpret_cast<Object**>(pc_);
@@ -148,6 +169,40 @@
   return reinterpret_cast<Object**>(pc_ + 1);
 }
 
+
+void Operand::set_modrm(int mod, Register rm) {
+  ASSERT((mod & -4) == 0);
+  buf_[0] = mod << 6 | (rm.code() & 0x7);
+  // Set REX.B to the high bit of rm.code().
+  rex_ |= (rm.code() >> 3);
+  len_ = 1;
+}
+
+
+void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
+  ASSERT(len_ == 1);
+  ASSERT((scale & -4) == 0);
+  // Use SIB with no index register only for base rsp or r12.
+  ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
+  buf_[1] = scale << 6 | (index.code() & 0x7) << 3 | (base.code() & 0x7);
+  rex_ |= (index.code() >> 3) << 1 | base.code() >> 3;
+  len_ = 2;
+}
+
+
+void Operand::set_disp32(int32_t disp) {
+  ASSERT(len_ == 1 || len_ == 2);
+  int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
+  *p = disp;
+  len_ += sizeof(int32_t);
+}
+
+
+Operand::Operand(Register reg) {
+  // reg
+  set_modrm(3, reg);
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_X64_ASSEMBLER_X64_INL_H_
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 0efb34a..d1b4d46 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -29,16 +29,366 @@
 
 #include "macro-assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 Register no_reg = { -1 };
+Register rax = { 0 };
+Register rcx = { 1 };
+Register rsi = { 7 };
 
 
+// Safe default is no features.
+uint64_t CpuFeatures::supported_ = 0;
+uint64_t CpuFeatures::enabled_ = 0;
+
+void CpuFeatures::Probe()  {
+  // TODO(X64): UNIMPLEMENTED
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+// Emit a single byte. Must always be inlined.
+#define EMIT(x)                                 \
+  *pc_++ = (x)
+
+#ifdef GENERATED_CODE_COVERAGE
+static void InitCoverageLog();
+#endif
+
+byte* Assembler::spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+  if (buffer == NULL) {
+    // do our own buffer management
+    if (buffer_size <= kMinimalBufferSize) {
+      buffer_size = kMinimalBufferSize;
+
+      if (spare_buffer_ != NULL) {
+        buffer = spare_buffer_;
+        spare_buffer_ = NULL;
+      }
+    }
+    if (buffer == NULL) {
+      buffer_ = NewArray<byte>(buffer_size);
+    } else {
+      buffer_ = static_cast<byte*>(buffer);
+    }
+    buffer_size_ = buffer_size;
+    own_buffer_ = true;
+  } else {
+    // use externally provided buffer instead
+    ASSERT(buffer_size > 0);
+    buffer_ = static_cast<byte*>(buffer);
+    buffer_size_ = buffer_size;
+    own_buffer_ = false;
+  }
+
+  // Clear the buffer in debug mode unless it was provided by the
+  // caller in which case we can't be sure it's okay to overwrite
+  // existing code in it; see CodePatcher::CodePatcher(...).
+#ifdef DEBUG
+  if (own_buffer_) {
+    memset(buffer_, 0xCC, buffer_size);  // int3
+  }
+#endif
+
+  // setup buffer pointers
+  ASSERT(buffer_ != NULL);
+  pc_ = buffer_;
+  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+
+  last_pc_ = NULL;
+  current_statement_position_ = RelocInfo::kNoPosition;
+  current_position_ = RelocInfo::kNoPosition;
+  written_statement_position_ = current_statement_position_;
+  written_position_ = current_position_;
+#ifdef GENERATED_CODE_COVERAGE
+  InitCoverageLog();
+#endif
+}
+
+
+Assembler::~Assembler() {
+  if (own_buffer_) {
+    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+      spare_buffer_ = buffer_;
+    } else {
+      DeleteArray(buffer_);
+    }
+  }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+  // finalize code
+  // (at this point overflow() may be true, but the gap ensures that
+  // we are still not overlapping instructions and relocation info)
+  ASSERT(pc_ <= reloc_info_writer.pos());  // no overlap
+  // setup desc
+  desc->buffer = buffer_;
+  desc->buffer_size = buffer_size_;
+  desc->instr_size = pc_offset();
+  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+  desc->origin = this;
+
+  Counters::reloc_info_size.Increment(desc->reloc_size);
+}
+
+
+void Assembler::Align(int m) {
+  ASSERT(IsPowerOf2(m));
+  while ((pc_offset() & (m - 1)) != 0) {
+    nop();
+  }
+}
+
+void Assembler::RecordComment(char const* a) {
+  UNIMPLEMENTED();
+}
+
+void Assembler::RecordPosition(int a) {
+  UNIMPLEMENTED();
+}
+
+void Assembler::RecordStatementPosition(int a) {
+  UNIMPLEMENTED();
+}
+
+void Assembler::bind(Label* a) {
+  UNIMPLEMENTED();
+}
+
+void Assembler::GrowBuffer() {
+  ASSERT(overflow());  // should not call this otherwise
+  if (!own_buffer_) FATAL("external code buffer is too small");
+
+  // compute new buffer size
+  CodeDesc desc;  // the new buffer
+  if (buffer_size_ < 4*KB) {
+    desc.buffer_size = 4*KB;
+  } else {
+    desc.buffer_size = 2*buffer_size_;
+  }
+  // Some internal data structures overflow for very large buffers,
+  // they must ensure that kMaximalBufferSize is not too large.
+  if ((desc.buffer_size > kMaximalBufferSize) ||
+      (desc.buffer_size > Heap::OldGenerationSize())) {
+    V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+  }
+
+  // setup new buffer
+  desc.buffer = NewArray<byte>(desc.buffer_size);
+  desc.instr_size = pc_offset();
+  desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
+
+  // Clear the buffer in debug mode. Use 'int3' instructions to make
+  // sure to get into problems if we ever run uninitialized code.
+#ifdef DEBUG
+  memset(desc.buffer, 0xCC, desc.buffer_size);
+#endif
+
+  // copy the data
+  int pc_delta = desc.buffer - buffer_;
+  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+  memmove(desc.buffer, buffer_, desc.instr_size);
+  memmove(rc_delta + reloc_info_writer.pos(),
+          reloc_info_writer.pos(), desc.reloc_size);
+
+  // switch buffers
+  if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+    spare_buffer_ = buffer_;
+  } else {
+    DeleteArray(buffer_);
+  }
+  buffer_ = desc.buffer;
+  buffer_size_ = desc.buffer_size;
+  pc_ += pc_delta;
+  if (last_pc_ != NULL) {
+    last_pc_ += pc_delta;
+  }
+  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+                               reloc_info_writer.last_pc() + pc_delta);
+
+  // relocate runtime entries
+  for (RelocIterator it(desc); !it.done(); it.next()) {
+    RelocInfo::Mode rmode = it.rinfo()->rmode();
+    if (rmode == RelocInfo::RUNTIME_ENTRY) {
+      int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+      *p -= pc_delta;  // relocate entry
+    } else if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+      int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+      if (*p != 0) {  // 0 means uninitialized.
+        *p += pc_delta;
+      }
+    }
+  }
+
+  ASSERT(!overflow());
+}
+
+
+void Assembler::emit_operand(Register reg, const Operand& adr) {
+  const unsigned length = adr.len_;
+  ASSERT(length > 0);
+
+  // Emit updated ModRM byte containing the given register.
+  pc_[0] = (adr.buf_[0] & ~0x38) | ((reg.code() && 0x7) << 3);
+
+  // Emit the rest of the encoded operand.
+  for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
+  pc_ += length;
+}
+
+
+void Assembler::add(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  EMIT(0x03);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::add(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  EMIT(0x03);
+  EMIT(0xC0 | (src.code() & 0x7) << 3 | (dst.code() & 0x7));
+}
+
+
+void Assembler::dec(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(rcx, dst);
+  EMIT(0xFF);
+  EMIT(0xC8 | (dst.code() & 0x7));
+}
+
+
+void Assembler::dec(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(rax, dst);
+  EMIT(0xFF);
+  emit_operand(rcx, dst);
+}
+
+
+void Assembler::hlt() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF4);
+}
+
+
+void Assembler::inc(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(rax, dst);
+  EMIT(0xFF);
+  EMIT(0xC0 | (dst.code() & 0x7));
+}
+
+
+void Assembler::inc(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(rax, dst);
+  EMIT(0xFF);
+  emit_operand(rax, dst);
+}
+
+
+void Assembler::int3() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xCC);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  EMIT(0x8B);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::mov(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  EMIT(0x89);
+  EMIT(0xC0 | (src.code() & 0x7) << 3 | (dst.code() & 0x7));
+}
+
+
+void Assembler::nop() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x90);
+}
+
+void Assembler::pop(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (dst.code() & 0x8) {
+    emit_rex_64(rax, dst);
+  }
+  EMIT(0x58 | (dst.code() & 0x7));
+}
+
+
+void Assembler::pop(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(rax, dst);  // Could be omitted in some cases.
+  EMIT(0x8F);
+  emit_operand(rax, dst);
+}
+
+
+void Assembler::push(Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (src.code() & 0x8) {
+    emit_rex_64(rax, src);
+  }
+  EMIT(0x50 | (src.code() & 0x7));
+}
+
+
+void Assembler::push(const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(rsi, src);  // Could be omitted in some cases.
+  EMIT(0xFF);
+  emit_operand(rsi, src);
+}
+
+
+void Assembler::ret(int imm16) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint16(imm16));
+  if (imm16 == 0) {
+    EMIT(0xC3);
+  } else {
+    EMIT(0xC2);
+    EMIT(imm16 & 0xFF);
+    EMIT((imm16 >> 8) & 0xFF);
+  }
+}
+
 } }  // namespace v8::internal
 
 
 // TODO(x64): Implement and move these to their correct cc-files:
-#include "assembler.h"
 #include "ast.h"
 #include "bootstrapper.h"
 #include "codegen-inl.h"
@@ -63,7 +413,8 @@
 #include "stub-cache.h"
 #include "unicode.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* a) {
   UNIMPLEMENTED();
@@ -77,33 +428,6 @@
   UNIMPLEMENTED();
 }
 
-Assembler::Assembler(void* a, int b) {
-  UNIMPLEMENTED();
-}
-
-void Assembler::GetCode(CodeDesc* a) {
-  UNIMPLEMENTED();
-}
-
-void Assembler::RecordComment(char const* a) {
-  UNIMPLEMENTED();
-}
-
-void Assembler::RecordPosition(int a) {
-  UNIMPLEMENTED();
-}
-
-void Assembler::RecordStatementPosition(int a) {
-  UNIMPLEMENTED();
-}
-
-void Assembler::bind(Label* a) {
-  UNIMPLEMENTED();
-}
-
-Assembler::~Assembler()  {
-  UNIMPLEMENTED();
-}
 
 void BreakLocationIterator::ClearDebugBreakAtReturn() {
   UNIMPLEMENTED();
@@ -118,10 +442,6 @@
   UNIMPLEMENTED();
 }
 
-void CEntryStub::GenerateBody(MacroAssembler* a, bool b) {
-  UNIMPLEMENTED();
-}
-
 void CallIC::Generate(MacroAssembler* a, int b, ExternalReference const& c) {
   UNIMPLEMENTED();
 }
@@ -137,7 +457,8 @@
 Object* CallStubCompiler::CompileCallConstant(Object* a,
                                               JSObject* b,
                                               JSFunction* c,
-                                              StubCompiler::CheckType d) {
+                                              StubCompiler::CheckType d,
+                                              Code::Flags flags) {
   UNIMPLEMENTED();
   return NULL;
 }
@@ -145,7 +466,8 @@
 Object* CallStubCompiler::CompileCallField(Object* a,
                                            JSObject* b,
                                            int c,
-                                           String* d) {
+                                           String* d,
+                                           Code::Flags flags) {
   UNIMPLEMENTED();
   return NULL;
 }
@@ -157,247 +479,13 @@
   return NULL;
 }
 
-CodeGenerator::CodeGenerator(int buffer_size,
-                             Handle<Script> script,
-                             bool is_eval)
-    : is_eval_(is_eval),
-      script_(script),
-      deferred_(8),
-      masm_(new MacroAssembler(NULL, buffer_size)),
-      scope_(NULL),
-      frame_(NULL),
-      allocator_(NULL),
-      state_(NULL),
-      loop_nesting_(0),
-      function_return_is_shadowed_(false),
-      in_spilled_code_(false) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::GenCode(FunctionLiteral* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::GenerateFastCaseSwitchJumpTable(SwitchStatement* a,
-                                                    int b,
-                                                    int c,
-                                                    Label* d,
-                                                    Vector<Label*> e,
-                                                    Vector<Label> f) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitBlock(Block* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitDeclaration(Declaration* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitIfStatement(IfStatement* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitLoopStatement(LoopStatement* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitForInStatement(ForInStatement* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitTryCatch(TryCatch* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitTryFinally(TryFinally* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitConditional(Conditional* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitSlot(Slot* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitLiteral(Literal* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitAssignment(Assignment* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitThrow(Throw* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitProperty(Property* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitCall(Call* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitCallEval(CallEval* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitCallNew(CallNew* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitCountOperation(CountOperation* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::VisitThisFunction(ThisFunction* a) {
-  UNIMPLEMENTED();
-}
-
-void CpuFeatures::Probe()  {
-  UNIMPLEMENTED();
-}
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void Debug::GenerateReturnDebugBreakEntry(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
 
 StackFrame::Type ExitFrame::GetStateForFramePointer(unsigned char* a,
                                                     StackFrame::State* b) {
-  UNIMPLEMENTED();
+  // TODO(X64): UNIMPLEMENTED
   return NONE;
 }
 
-void JSEntryStub::GenerateBody(MacroAssembler* a, bool b) {
-  UNIMPLEMENTED();
-}
-
 int JavaScriptFrame::GetProvidedParametersCount() const {
   UNIMPLEMENTED();
   return 0;
@@ -415,129 +503,6 @@
   UNIMPLEMENTED();
 }
 
-void KeyedLoadIC::ClearInlinedVersion(unsigned char* a) {
-  UNIMPLEMENTED();
-}
-
-void KeyedLoadIC::Generate(MacroAssembler* a, ExternalReference const& b) {
-  UNIMPLEMENTED();
-}
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-bool KeyedLoadIC::PatchInlinedLoad(unsigned char* a, Object* b) {
-  UNIMPLEMENTED();
-  return false;
-}
-
-Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* a) {
-  UNIMPLEMENTED();
-  return NULL;
-}
-
-Object* KeyedLoadStubCompiler::CompileLoadCallback(String* a,
-                                                   JSObject* b,
-                                                   JSObject* c,
-                                                   AccessorInfo* d) {
-  UNIMPLEMENTED();
-  return NULL;
-}
-
-Object* KeyedLoadStubCompiler::CompileLoadConstant(String* a,
-                                                   JSObject* b,
-                                                   JSObject* c,
-                                                   Object* d) {
-  UNIMPLEMENTED();
-  return NULL;
-}
-
-Object* KeyedLoadStubCompiler::CompileLoadField(String* a,
-                                                JSObject* b,
-                                                JSObject* c,
-                                                int d) {
-  UNIMPLEMENTED();
-  return NULL;
-}
-
-Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* a) {
-  UNIMPLEMENTED();
-  return NULL;
-}
-
-Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* a,
-                                                      JSObject* b,
-                                                      String* c) {
-  UNIMPLEMENTED();
-  return NULL;
-}
-
-Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* a) {
-  UNIMPLEMENTED();
-  return NULL;
-}
-
-void KeyedStoreIC::Generate(MacroAssembler* a, ExternalReference const& b) {
-  UNIMPLEMENTED();
-}
-
-void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler*a) {
-  UNIMPLEMENTED();
-}
-
-Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* a,
-                                                  int b,
-                                                  Map* c,
-                                                  String* d) {
-  UNIMPLEMENTED();
-  return NULL;
-}
-
-void LoadIC::ClearInlinedVersion(unsigned char* a) {
-  UNIMPLEMENTED();
-}
-
-void LoadIC::Generate(MacroAssembler* a, ExternalReference const& b) {
-  UNIMPLEMENTED();
-}
-
-void LoadIC::GenerateArrayLength(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void LoadIC::GenerateMiss(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void LoadIC::GenerateStringLength(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-bool LoadIC::PatchInlinedLoad(unsigned char* a, Object* b, int c) {
-  UNIMPLEMENTED();
-  return false;
-}
 
 Object* LoadStubCompiler::CompileLoadCallback(JSObject* a,
                                               JSObject* b,
@@ -570,19 +535,6 @@
   return NULL;
 }
 
-MacroAssembler::MacroAssembler(void* buffer, int size)
-  : Assembler(buffer, size),
-    unresolved_(0),
-    generating_stub_(false),
-    allow_stub_calls_(true),
-    code_object_(Heap::undefined_value()) {
-  UNIMPLEMENTED();
-}
-
-void MacroAssembler::TailCallRuntime(ExternalReference const& a, int b) {
-  UNIMPLEMENTED();
-}
-
 bool RegisterAllocator::IsReserved(int a) {
   UNIMPLEMENTED();
   return false;
@@ -600,18 +552,6 @@
   return NONE;
 }
 
-void StoreIC::Generate(MacroAssembler* a, ExternalReference const& b) {
-  UNIMPLEMENTED();
-}
-
-void StoreIC::GenerateExtendStorage(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
 Object* StoreStubCompiler::CompileStoreCallback(JSObject* a,
                                                 AccessorInfo* b,
                                                 String* c) {
@@ -672,14 +612,7 @@
   UNIMPLEMENTED();
 }
 
-VirtualFrame::VirtualFrame(CodeGenerator* cgen)
-    : cgen_(cgen),
-      masm_(NULL),
-      elements_(0),
-      parameter_count_(0),
-      local_count_(0),
-      stack_pointer_(0),
-      frame_pointer_(kIllegalIndex) {
+VirtualFrame::VirtualFrame() : elements_(0) {
   UNIMPLEMENTED();
 }
 
@@ -688,34 +621,6 @@
   return NULL;
 }
 
-void Builtins::Generate_Adaptor(MacroAssembler* a, Builtins::CFunctionId b) {
-  UNIMPLEMENTED();
-}
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void Builtins::Generate_FunctionApply(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void Builtins::Generate_FunctionCall(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void Builtins::Generate_JSConstructCall(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* a) {
-  UNIMPLEMENTED();
-}
-
 void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* a) {
   UNIMPLEMENTED();
 }
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 40fcdd3..ba40eb3 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -37,7 +37,8 @@
 #ifndef V8_X64_ASSEMBLER_X64_H_
 #define V8_X64_ASSEMBLER_X64_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // CPU Registers.
 //
@@ -264,34 +265,22 @@
   // disp only must always be relocated
 
   // [base + disp/r]
-  explicit Operand(Register base, intptr_t disp,
+  explicit Operand(Register base, int32_t disp,
                    RelocInfo::Mode rmode = RelocInfo::NONE);
 
   // [base + index*scale + disp/r]
   explicit Operand(Register base,
                    Register index,
                    ScaleFactor scale,
-                   intptr_t disp,
+                   int32_t disp,
                    RelocInfo::Mode rmode = RelocInfo::NONE);
 
   // [index*scale + disp/r]
   explicit Operand(Register index,
                    ScaleFactor scale,
-                   intptr_t disp,
+                   int32_t disp,
                    RelocInfo::Mode rmode = RelocInfo::NONE);
 
-  static Operand StaticVariable(const ExternalReference& ext) {
-    return Operand(reinterpret_cast<intptr_t>(ext.address()),
-                   RelocInfo::EXTERNAL_REFERENCE);
-  }
-
-  static Operand StaticArray(Register index,
-                             ScaleFactor scale,
-                             const ExternalReference& arr) {
-    return Operand(index, scale, reinterpret_cast<intptr_t>(arr.address()),
-                   RelocInfo::EXTERNAL_REFERENCE);
-  }
-
   // End of constructors and methods that have been moved to MemOperand.
 
  private:
@@ -309,7 +298,6 @@
   inline void set_sib(ScaleFactor scale, Register index, Register base);
   inline void set_disp8(int8_t disp);
   inline void set_disp32(int32_t disp);
-  inline void set_dispr(intptr_t disp, RelocInfo::Mode rmode);
 
   friend class Assembler;
 };
@@ -317,40 +305,28 @@
 class MemOperand : public Operand {
  public:
   // [disp/r]
-  INLINE(explicit MemOperand(intptr_t disp, RelocInfo::Mode rmode)) :
+  INLINE(explicit MemOperand(int32_t disp, RelocInfo::Mode rmode)) :
       Operand() {
     UNIMPLEMENTED();
   }
   // disp only must always be relocated
 
   // [base + disp/r]
-  explicit MemOperand(Register base, intptr_t disp,
+  explicit MemOperand(Register base, int32_t disp,
                    RelocInfo::Mode rmode = RelocInfo::NONE);
 
   // [base + index*scale + disp/r]
   explicit MemOperand(Register base,
                    Register index,
                    ScaleFactor scale,
-                   intptr_t disp,
+                   int32_t disp,
                    RelocInfo::Mode rmode = RelocInfo::NONE);
 
   // [index*scale + disp/r]
   explicit MemOperand(Register index,
                    ScaleFactor scale,
-                   intptr_t disp,
+                   int32_t disp,
                    RelocInfo::Mode rmode = RelocInfo::NONE);
-
-  static MemOperand StaticVariable(const ExternalReference& ext) {
-    return MemOperand(reinterpret_cast<intptr_t>(ext.address()),
-                   RelocInfo::EXTERNAL_REFERENCE);
-  }
-
-  static MemOperand StaticArray(Register index,
-                             ScaleFactor scale,
-                             const ExternalReference& arr) {
-    return MemOperand(index, scale, reinterpret_cast<intptr_t>(arr.address()),
-                   RelocInfo::EXTERNAL_REFERENCE);
-  }
 };
 
 // -----------------------------------------------------------------------------
@@ -461,7 +437,8 @@
  private:
   // The relocation writer's position is kGap bytes below the end of
   // the generated instructions. This leaves enough space for the
-  // longest possible ia32 instruction (17 bytes as of 9/26/06) and
+  // longest possible x64 instruction (There is a 15 byte limit on
+  // instruction length, ruling out some otherwise valid instructions) and
   // allows for a single, fast space check per instruction.
   static const int kGap = 32;
 
@@ -499,22 +476,20 @@
   // ---------------------------------------------------------------------------
   // Code generation
   //
-  // - function names correspond one-to-one to ia32 instruction mnemonics
-  // - unless specified otherwise, instructions operate on 32bit operands
-  // - instructions on 8bit (byte) operands/registers have a trailing '_b'
-  // - instructions on 16bit (word) operands/registers have a trailing '_w'
-  // - naming conflicts with C++ keywords are resolved via a trailing '_'
-
-  // NOTE ON INTERFACE: Currently, the interface is not very consistent
-  // in the sense that some operations (e.g. mov()) can be called in more
-  // the one way to generate the same instruction: The Register argument
-  // can in some cases be replaced with an Operand(Register) argument.
-  // This should be cleaned up and made more orthogonal. The questions
-  // is: should we always use Operands instead of Registers where an
-  // Operand is possible, or should we have a Register (overloaded) form
-  // instead? We must be careful to make sure that the selected instruction
-  // is obvious from the parameters to avoid hard-to-find code generation
-  // bugs.
+  // Function names correspond one-to-one to x64 instruction mnemonics.
+  // Unless specified otherwise, instructions operate on 64-bit operands.
+  //
+  // If we need versions of an assembly instruction that operate on different
+  // width arguments, we add a single-letter suffix specifying the width.
+  // This is done for the following instructions: mov, cmp.
+  // There are no versions of these instructions without the suffix.
+  // - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
+  // - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
+  // - Instructions on 32-bit (doubleword) operands/registers use 'l'.
+  // - Instructions on 64-bit (quadword) operands/registers use 'q'.
+  //
+  // Some mnemonics, such as "and", are the same as C++ keywords.
+  // Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
 
   // Insert the smallest number of nop instructions
   // possible to align the pc offset to a multiple
@@ -576,6 +551,7 @@
   void adc(Register dst, int32_t imm32);
   void adc(Register dst, const Operand& src);
 
+  void add(Register dst, Register src);
   void add(Register dst, const Operand& src);
   void add(const Operand& dst, const Immediate& x);
 
@@ -839,6 +815,14 @@
   inline void emit(const Immediate& x);
   inline void emit_w(const Immediate& x);
 
+  // Emits a REX prefix that encodes a 64-bit operand size and
+  // the top bit of both register codes.
+  inline void emit_rex_64(Register reg, Register rm_reg);
+
+  // Emits a REX prefix that encodes a 64-bit operand size and
+  // the top bit of the destination, index, and base register codes.
+  inline void emit_rex_64(Register reg, const Operand& op);
+
   // Emit the code-object-relative offset of the label's position
   inline void emit_code_relative_offset(Label* label);
 
@@ -877,6 +861,8 @@
   int buffer_size_;
   // True if the assembler owns the buffer, false if buffer is external.
   bool own_buffer_;
+  // A previously allocated buffer of kMinimalBufferSize bytes, or NULL.
+  static byte* spare_buffer_;
 
   // code generation
   byte* pc_;  // the program counter; moves forward
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 209aa2d..3f1cd9f 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -25,3 +25,41 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include "v8.h"
+#include "codegen-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+                                Builtins::CFunctionId id) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+} }  // namespace v8::internal
+
+
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 209aa2d..3df5470 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -25,3 +25,214 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+
+#include "v8.h"
+#include "macro-assembler.h"
+#include "register-allocator-inl.h"
+#include "codegen.h"
+
+namespace v8 {
+namespace internal {
+
+CodeGenerator::CodeGenerator(int buffer_size,
+                             Handle<Script> script,
+                             bool is_eval)
+    : is_eval_(is_eval),
+      script_(script),
+      deferred_(8),
+      masm_(new MacroAssembler(NULL, buffer_size)),
+      scope_(NULL),
+      frame_(NULL),
+      allocator_(NULL),
+      state_(NULL),
+      loop_nesting_(0),
+      function_return_is_shadowed_(false),
+      in_spilled_code_(false) {
+}
+
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenCode(FunctionLiteral* a) {
+  masm_->int3();  // UNIMPLEMENTED
+}
+
+void CodeGenerator::GenerateFastCaseSwitchJumpTable(SwitchStatement* a,
+                                                    int b,
+                                                    int c,
+                                                    Label* d,
+                                                    Vector<Label*> e,
+                                                    Vector<Label> f) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitBlock(Block* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitDeclaration(Declaration* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitIfStatement(IfStatement* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitContinueStatement(ContinueStatement* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitBreakStatement(BreakStatement* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitReturnStatement(ReturnStatement* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitLoopStatement(LoopStatement* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitForInStatement(ForInStatement* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitTryCatch(TryCatch* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitTryFinally(TryFinally* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitConditional(Conditional* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitSlot(Slot* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitLiteral(Literal* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitAssignment(Assignment* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitThrow(Throw* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitProperty(Property* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCall(Call* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCallEval(CallEval* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCallNew(CallNew* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCallRuntime(CallRuntime* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCountOperation(CountOperation* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitThisFunction(ThisFunction* a) {
+  UNIMPLEMENTED();
+}
+
+
+void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+  masm->int3();  // TODO(X64): UNIMPLEMENTED.
+}
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+  masm->int3();  // TODO(X64): UNIMPLEMENTED.
+}
+
+
+
+} }  // namespace v8::internal
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 98117d2..374cf83 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -28,7 +28,8 @@
 #ifndef V8_X64_CODEGEN_X64_H_
 #define V8_X64_CODEGEN_X64_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Forward declarations
 class DeferredCode;
@@ -485,8 +486,7 @@
   Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
-  Handle<Code> ComputeCallInitialize(int argc);
-  Handle<Code> ComputeCallInitializeInLoop(int argc);
+  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
 
   // Declare global variables and functions in the given array of
   // name/value pairs.
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index ab996d1..8df0ab7 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -32,7 +32,8 @@
 #include "cpu.h"
 #include "macro-assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 void CPU::Setup() {
   CpuFeatures::Probe();
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 209aa2d..3b10132 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -25,3 +25,59 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
+  UNIMPLEMENTED();
+  return false;
+}
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED
+}
+
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED
+}
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED
+}
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED
+}
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED
+}
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED
+}
+
+void Debug::GenerateReturnDebugBreakEntry(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED
+}
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED
+}
+
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED
+}
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+} }  // namespace v8::internal
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index f4468f6..345e33a 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -28,7 +28,8 @@
 #ifndef V8_X64_FRAMES_X64_H_
 #define V8_X64_FRAMES_X64_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // TODO(x64): This is a stub, mostly just a copy of the ia32 bit version.
 // This will all need to change to be correct for x64.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 209aa2d..71a3a9a 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -25,3 +25,152 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+void KeyedLoadIC::ClearInlinedVersion(Address address) {
+  UNIMPLEMENTED();
+}
+
+void KeyedLoadIC::Generate(MacroAssembler* masm,
+                           ExternalReference const& f) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+  UNIMPLEMENTED();
+  return false;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+                                                   JSObject* object,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+                                                   JSObject* object,
+                                                   JSObject* holder,
+                                                   Object* callback) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+                                                JSObject* object,
+                                                JSObject* holder,
+                                                int index) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* object,
+                                                      JSObject* holder,
+                                                      String* name) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+                                                  int index,
+                                                  Map* transition,
+                                                  String* name) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+void LoadIC::ClearInlinedVersion(Address address) {
+  UNIMPLEMENTED();
+}
+
+void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) {
+  UNIMPLEMENTED();
+  return false;
+}
+
+void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  masm->int3();  // UNIMPLEMENTED.
+}
+
+} }  // namespace v8::internal
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 209aa2d..5e6dd3d 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -25,3 +25,24 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(void* buffer, int size)
+  : Assembler(buffer, size),
+    unresolved_(0),
+    generating_stub_(false),
+    allow_stub_calls_(true),
+    code_object_(Heap::undefined_value()) {
+}
+
+void MacroAssembler::TailCallRuntime(ExternalReference const& a, int b) {
+  UNIMPLEMENTED();
+}
+
+} }  // namespace v8::internal
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 159d0c4..67c7bdd 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -30,7 +30,8 @@
 
 #include "assembler.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // Forward declaration.
 class JumpTarget;
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 0de0283..f02502b 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -29,8 +29,10 @@
 #define V8_X64_VIRTUAL_FRAME_X64_H_
 
 #include "register-allocator.h"
+#include "scopes.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 // -------------------------------------------------------------------------
 // Virtual frames
@@ -41,7 +43,7 @@
 // as random access to the expression stack elements, locals, and
 // parameters.
 
-class VirtualFrame : public Malloced {
+class VirtualFrame : public ZoneObject {
  public:
   // A utility class to introduce a scope where the virtual frame is
   // expected to remain spilled.  The constructor spills the code
@@ -50,29 +52,39 @@
   // generator is being transformed.
   class SpilledScope BASE_EMBEDDED {
    public:
-    explicit SpilledScope(CodeGenerator* cgen);
+    SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
+      ASSERT(cgen()->has_valid_frame());
+      cgen()->frame()->SpillAll();
+      cgen()->set_in_spilled_code(true);
+    }
 
-    ~SpilledScope();
+    ~SpilledScope() {
+      cgen()->set_in_spilled_code(previous_state_);
+    }
 
    private:
-    CodeGenerator* cgen_;
     bool previous_state_;
+
+    CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
   };
 
   // An illegal index into the virtual frame.
   static const int kIllegalIndex = -1;
 
   // Construct an initial virtual frame on entry to a JS function.
-  explicit VirtualFrame(CodeGenerator* cgen);
+  VirtualFrame();
 
   // Construct a virtual frame as a clone of an existing one.
   explicit VirtualFrame(VirtualFrame* original);
 
+  CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+  MacroAssembler* masm() { return cgen()->masm(); }
+
   // Create a duplicate of an existing valid frame element.
   FrameElement CopyElementAt(int index);
 
   // The height of the virtual expression stack.
-  int height() const {
+  int height() {
     return elements_.length() - expression_base_index();
   }
 
@@ -98,7 +110,12 @@
   // match an external frame effect (examples include a call removing
   // its arguments, and exiting a try/catch removing an exception
   // handler).  No code will be emitted.
-  void Forget(int count);
+  void Forget(int count) {
+    ASSERT(count >= 0);
+    ASSERT(stack_pointer_ == elements_.length() - 1);
+    stack_pointer_ -= count;
+    ForgetElements(count);
+  }
 
   // Forget count elements from the top of the frame without adjusting
   // the stack pointer downward.  This is used, for example, before
@@ -109,13 +126,22 @@
   void SpillAll();
 
   // Spill all occurrences of a specific register from the frame.
-  void Spill(Register reg);
+  void Spill(Register reg) {
+    if (is_used(reg)) SpillElementAt(register_index(reg));
+  }
 
   // Spill all occurrences of an arbitrary register if possible.  Return the
   // register spilled or no_reg if it was not possible to free any register
   // (ie, they all have frame-external references).
   Register SpillAnyRegister();
 
+  // Make this frame so that an arbitrary frame of the same height can
+  // be merged to it.  Copies and constants are removed from the
+  // topmost mergable_elements elements of the frame.  A
+  // mergable_elements of JumpTarget::kAllElements indicates constants
+  // and copies are should be removed from the entire frame.
+  void MakeMergable(int mergable_elements);
+
   // Prepare this virtual frame for merging to an expected frame by
   // performing some state changes that do not require generating
   // code.  It is guaranteed that no code will be generated.
@@ -131,7 +157,7 @@
   // registers.  Used when the code generator's frame is switched from this
   // one to NULL by an unconditional jump.
   void DetachFromCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen_->allocator();
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
     for (int i = 0; i < kNumRegisters; i++) {
       if (is_used(i)) {
         Register temp = { i };
@@ -139,12 +165,13 @@
       }
     }
   }
+
   // (Re)attach a frame to its code generator.  This informs the register
   // allocator that the frame-internal register references are active again.
   // Used when a code generator's frame is switched from NULL to this one by
   // binding a label.
   void AttachToCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen_->allocator();
+    RegisterAllocator* cgen_allocator = cgen()->allocator();
     for (int i = 0; i < kNumRegisters; i++) {
       if (is_used(i)) {
         Register temp = { i };
@@ -166,7 +193,7 @@
   void PrepareForReturn();
 
   // Allocate and initialize the frame-allocated locals.
-  void AllocateStackSlots(int count);
+  void AllocateStackSlots();
 
   // An element of the expression stack as an assembly operand.
   Operand ElementAt(int index) const {
@@ -192,9 +219,9 @@
   }
 
   // A frame-allocated local as an assembly operand.
-  Operand LocalAt(int index) const {
+  Operand LocalAt(int index) {
     ASSERT(0 <= index);
-    ASSERT(index < local_count_);
+    ASSERT(index < local_count());
     return Operand(rbp, kLocal0Offset - index * kPointerSize);
   }
 
@@ -230,10 +257,10 @@
   void RestoreContextRegister();
 
   // A parameter as an assembly operand.
-  Operand ParameterAt(int index) const {
+  Operand ParameterAt(int index) {
     ASSERT(-1 <= index);  // -1 is the receiver.
-    ASSERT(index < parameter_count_);
-    return Operand(rbp, (1 + parameter_count_ - index) * kPointerSize);
+    ASSERT(index < parameter_count());
+    return Operand(rbp, (1 + parameter_count() - index) * kPointerSize);
   }
 
   // Push a copy of the value of a parameter frame slot on top of the frame.
@@ -255,14 +282,17 @@
   }
 
   // The receiver frame slot.
-  Operand Receiver() const { return ParameterAt(-1); }
+  Operand Receiver() { return ParameterAt(-1); }
 
   // Push a try-catch or try-finally handler on top of the virtual frame.
   void PushTryHandler(HandlerType type);
 
   // Call stub given the number of arguments it expects on (and
   // removes from) the stack.
-  Result CallStub(CodeStub* stub, int arg_count);
+  Result CallStub(CodeStub* stub, int arg_count) {
+    PrepareForCall(arg_count, arg_count);
+    return RawCallStub(stub);
+  }
 
   // Call stub that takes a single argument passed in eax.  The
   // argument is given as a result which does not have to be eax or
@@ -346,7 +376,15 @@
 
   // Pushing a result invalidates it (its contents become owned by the
   // frame).
-  void Push(Result* result);
+  void Push(Result* result) {
+    if (result->is_register()) {
+      Push(result->reg(), result->static_type());
+    } else {
+      ASSERT(result->is_constant());
+      Push(result->handle());
+    }
+    result->Unuse();
+  }
 
   // Nip removes zero or more elements from immediately below the top
   // of the frame, leaving the previous top-of-frame value on top of
@@ -361,70 +399,69 @@
   static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
   static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
 
-  CodeGenerator* cgen_;
-  MacroAssembler* masm_;
-
-  List<FrameElement> elements_;
-
-  // The number of frame-allocated locals and parameters respectively.
-  int parameter_count_;
-  int local_count_;
+  ZoneList<FrameElement> elements_;
 
   // The index of the element that is at the processor's stack pointer
   // (the esp register).
   int stack_pointer_;
 
-  // The index of the element that is at the processor's frame pointer
-  // (the ebp register).
-  int frame_pointer_;
-
   // The index of the register frame element using each register, or
   // kIllegalIndex if a register is not on the frame.
   int register_locations_[kNumRegisters];
 
+  // The number of frame-allocated locals and parameters respectively.
+  int parameter_count() { return cgen()->scope()->num_parameters(); }
+  int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+  // The index of the element that is at the processor's frame pointer
+  // (the ebp register).  The parameters, receiver, and return address
+  // are below the frame pointer.
+  int frame_pointer() { return parameter_count() + 2; }
+
   // The index of the first parameter.  The receiver lies below the first
   // parameter.
   int param0_index() const { return 1; }
 
-  // The index of the context slot in the frame.
-  int context_index() const {
-    ASSERT(frame_pointer_ != kIllegalIndex);
-    return frame_pointer_ + 1;
-  }
+  // The index of the context slot in the frame.  It is immediately
+  // above the frame pointer.
+  int context_index() { return frame_pointer() + 1; }
 
-  // The index of the function slot in the frame.  It lies above the context
-  // slot.
-  int function_index() const {
-    ASSERT(frame_pointer_ != kIllegalIndex);
-    return frame_pointer_ + 2;
-  }
+  // The index of the function slot in the frame.  It is above the frame
+  // pointer and the context slot.
+  int function_index() { return frame_pointer() + 2; }
 
-  // The index of the first local.  Between the parameters and the locals
-  // lie the return address, the saved frame pointer, the context, and the
-  // function.
-  int local0_index() const {
-    ASSERT(frame_pointer_ != kIllegalIndex);
-    return frame_pointer_ + 3;
-  }
+  // The index of the first local.  Between the frame pointer and the
+  // locals lie the context and the function.
+  int local0_index() { return frame_pointer() + 3; }
 
   // The index of the base of the expression stack.
-  int expression_base_index() const { return local0_index() + local_count_; }
+  int expression_base_index() { return local0_index() + local_count(); }
 
   // Convert a frame index into a frame pointer relative offset into the
   // actual stack.
-  int fp_relative(int index) const {
-    return (frame_pointer_ - index) * kPointerSize;
+  int fp_relative(int index) {
+    ASSERT(index < elements_.length());
+    ASSERT(frame_pointer() < elements_.length());  // FP is on the frame.
+    return (frame_pointer() - index) * kPointerSize;
   }
 
   // Record an occurrence of a register in the virtual frame.  This has the
   // effect of incrementing the register's external reference count and
   // of updating the index of the register's location in the frame.
-  void Use(Register reg, int index);
+  void Use(Register reg, int index) {
+    ASSERT(!is_used(reg));
+    register_locations_[reg.code()] = index;
+    cgen()->allocator()->Use(reg);
+  }
 
   // Record that a register reference has been dropped from the frame.  This
   // decrements the register's external reference count and invalidates the
   // index of the register's location in the frame.
-  void Unuse(Register reg);
+  void Unuse(Register reg) {
+    ASSERT(register_locations_[reg.code()] != kIllegalIndex);
+    register_locations_[reg.code()] = kIllegalIndex;
+    cgen()->allocator()->Unuse(reg);
+  }
 
   // Spill the element at a particular index---write it to memory if
   // necessary, free any associated register, and forget its value if
@@ -503,6 +540,7 @@
   friend class JumpTarget;
 };
 
+
 } }  // namespace v8::internal
 
 #endif  // V8_X64_VIRTUAL_FRAME_X64_H_
diff --git a/src/zone-inl.h b/src/zone-inl.h
index 69b9a0a..9af6251 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -31,7 +31,8 @@
 #include "zone.h"
 #include "v8-counters.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 inline void* Zone::New(int size) {
diff --git a/src/zone.cc b/src/zone.cc
index c8f9c85..d78c19b 100644
--- a/src/zone.cc
+++ b/src/zone.cc
@@ -29,7 +29,8 @@
 
 #include "zone-inl.h"
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 Address Zone::position_ = 0;
diff --git a/src/zone.h b/src/zone.h
index fe66caf..a8b26e9 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -28,7 +28,8 @@
 #ifndef V8_ZONE_H_
 #define V8_ZONE_H_
 
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
 
 
 // Zone scopes are in one of two modes.  Either they delete the zone