Improved code generation infrastructure by doing simple register allocation and constant folding and propagation.

Optimized regular expression matching by avoiding to create intermediate string arrays and by flattening nested array representations of RegExp data.

Traverse a few stack frames when recording profiler samples to include partial call graphs in the profiling output.

Added support for using OProfile to profile generated code.

Added remote debugging support to the D8 developer shell.

Optimized creation of nested literals like JSON objects.

Fixed a bug in garbage collecting unused maps and turned it on by default (--collect-maps).

Added support for running tests under Valgrind.


git-svn-id: http://v8.googlecode.com/svn/trunk@1495 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 5d963d0..c9fd144 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,27 @@
+2009-03-12: Version 1.1.0
+
+        Improved code generation infrastructure by doing simple register
+        allocation and constant folding and propagation.
+
+        Optimized regular expression matching by avoiding to create
+        intermediate string arrays and by flattening nested array
+        representations of RegExp data.
+
+        Traverse a few stack frames when recording profiler samples to
+        include partial call graphs in the profiling output.
+
+        Added support for using OProfile to profile generated code.
+
+        Added remote debugging support to the D8 developer shell.
+
+        Optimized creation of nested literals like JSON objects.
+
+        Fixed a bug in garbage collecting unused maps and turned it on by
+        default (--collect-maps).
+
+        Added support for running tests under Valgrind.
+
+
 2009-02-27: Version 1.0.3
 
         Optimized double-to-integer conversions in bit operations by using
diff --git a/SConstruct b/SConstruct
index 43293e6..64e6963 100644
--- a/SConstruct
+++ b/SConstruct
@@ -68,6 +68,9 @@
     'wordsize:64': {
       'CCFLAGS':      ['-m32'],
       'LINKFLAGS':    ['-m32']
+    },
+    'prof:oprofile': {
+      'CPPDEFINES':   ['ENABLE_OPROFILE_AGENT']
     }
   },
   'msvc': {
@@ -129,7 +132,8 @@
       'WARNINGFLAGS': ['/W3', '/WX', '/wd4355', '/wd4800']
     },
     'library:shared': {
-      'CPPDEFINES':   ['BUILDING_V8_SHARED']
+      'CPPDEFINES':   ['BUILDING_V8_SHARED'],
+      'LIBS': ['winmm', 'ws2_32']
     },
     'arch:arm': {
       'CPPDEFINES':   ['ARM'],
@@ -249,6 +253,10 @@
     },
     'mode:debug': {
       'CCFLAGS':      ['-g', '-O0']
+    },
+    'prof:oprofile': {
+      'LIBPATH': ['/usr/lib32', '/usr/lib32/oprofile'],
+      'LIBS': ['opagent']
     }
   },
   'msvc': {
@@ -362,7 +370,7 @@
     'help': 'build using snapshots for faster start-up'
   },
   'prof': {
-    'values': ['on', 'off'],
+    'values': ['on', 'off', 'oprofile'],
     'default': 'off',
     'help': 'enable profiling of build target'
   },
@@ -435,6 +443,8 @@
     return False
   if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on':
     Abort("Profiling on windows only supported for static library.")
+  if env['prof'] == 'oprofile' and env['os'] != 'linux':
+    Abort("OProfile is only supported on Linux.")
   for (name, option) in SIMPLE_OPTIONS.iteritems():
     if (not option.get('default')) and (name not in ARGUMENTS):
       message = ("A value for option %s must be specified (%s)." %
diff --git a/include/v8-debug.h b/include/v8-debug.h
index c32c7f5..42bb24f 100644
--- a/include/v8-debug.h
+++ b/include/v8-debug.h
@@ -158,6 +158,13 @@
   */
   static Handle<Value> Call(v8::Handle<v8::Function> fun,
                             Handle<Value> data = Handle<Value>());
+
+ /**
+  * Enable the V8 builtin debug agent. The debugger agent will listen on the
+  * supplied TCP/IP port for remote debugger connection.
+  * \param port the TCP/IP port to listen on
+  */
+  static bool EnableAgent(int port);
 };
 
 
diff --git a/include/v8.h b/include/v8.h
index a048acd..44d0307 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -524,6 +524,11 @@
    * Runs the script returning the resulting value.
    */
   Local<Value> Run();
+
+  /**
+   * Returns the script id value.
+   */
+  Local<Value> Id();
 };
 
 
diff --git a/src/SConscript b/src/SConscript
index 34d2a44..67f7889 100644
--- a/src/SConscript
+++ b/src/SConscript
@@ -39,25 +39,32 @@
     'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
     'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
     'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
-    'disassembler.cc', 'execution.cc', 'factory.cc', 'flags.cc', 'frames.cc',
-    'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc', 'ic.cc',
-    'interpreter-irregexp.cc', 'jsregexp.cc', 'log.cc', 'mark-compact.cc',
-    'messages.cc', 'objects.cc', 'parser.cc', 'property.cc',
-    'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc',
-    'regexp-stack.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
+    'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc',
+    'flags.cc', 'frames.cc', 'global-handles.cc', 'handles.cc', 'hashmap.cc',
+    'heap.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
+    'jump-target.cc', 'log.cc', 'mark-compact.cc', 'messages.cc', 'objects.cc',
+    'oprofile-agent.cc', 'parser.cc', 'property.cc', 'regexp-macro-assembler.cc',
+    'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
+    'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
     'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
     'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
-    'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc', 'v8.cc',
-    'v8threads.cc', 'variables.cc', 'zone.cc'
+    'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
+    'v8.cc', 'v8threads.cc', 'variables.cc', 'virtual-frame.cc', 'zone.cc'
   ],
-  'arch:arm':  ['assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc',
-      'cpu-arm.cc', 'debug-arm.cc', 'disasm-arm.cc', 'frames-arm.cc',
-      'ic-arm.cc', 'macro-assembler-arm.cc', 'regexp-macro-assembler-arm.cc', 
-      'stub-cache-arm.cc'],
-  'arch:ia32': ['assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc',
-      'cpu-ia32.cc', 'debug-ia32.cc', 'disasm-ia32.cc', 'frames-ia32.cc',
-      'ic-ia32.cc', 'macro-assembler-ia32.cc', 'regexp-macro-assembler-ia32.cc',
-      'stub-cache-ia32.cc'],
+  'arch:arm': [
+    'assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc', 'cpu-arm.cc',
+    'disasm-arm.cc', 'debug-arm.cc', 'frames-arm.cc', 'ic-arm.cc',
+    'jump-target-arm.cc', 'macro-assembler-arm.cc',
+    'regexp-macro-assembler-arm.cc', 'register-allocator-arm.cc',
+    'stub-cache-arm.cc', 'virtual-frame-arm.cc'
+  ],
+  'arch:ia32': [
+    'assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc',
+    'cpu-ia32.cc', 'disasm-ia32.cc', 'debug-ia32.cc', 'frames-ia32.cc',
+    'ic-ia32.cc', 'jump-target-ia32.cc', 'macro-assembler-ia32.cc',
+    'regexp-macro-assembler-ia32.cc', 'register-allocator-ia32.cc',
+    'stub-cache-ia32.cc', 'virtual-frame-ia32.cc'
+  ],
   'simulator:arm': ['simulator-arm.cc'],
   'os:freebsd': ['platform-freebsd.cc'],
   'os:linux':   ['platform-linux.cc'],
diff --git a/src/accessors.cc b/src/accessors.cc
index 901dc07..d779eb2 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -198,6 +198,24 @@
 
 
 //
+// Accessors::ScriptId
+//
+
+
+Object* Accessors::ScriptGetId(Object* object, void*) {
+  Object* script = JSValue::cast(object)->value();
+  return Script::cast(script)->id();
+}
+
+
+const AccessorDescriptor Accessors::ScriptId = {
+  ScriptGetId,
+  IllegalSetter,
+  0
+};
+
+
+//
 // Accessors::ScriptLineOffset
 //
 
@@ -257,9 +275,10 @@
 
 
 Object* Accessors::ScriptGetLineEnds(Object* object, void*) {
-  Object* script = JSValue::cast(object)->value();
-  Script::cast(script)->InitLineEnds();
-  return Script::cast(script)->line_ends();
+  HandleScope scope;
+  Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
+  InitScriptLineEnds(script);
+  return script->line_ends();
 }
 
 
diff --git a/src/accessors.h b/src/accessors.h
index 0175c9a..938b014 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -42,6 +42,7 @@
   V(StringLength)        \
   V(ScriptSource)        \
   V(ScriptName)          \
+  V(ScriptId)            \
   V(ScriptLineOffset)    \
   V(ScriptColumnOffset)  \
   V(ScriptType)          \
@@ -79,6 +80,7 @@
   static Object* ArrayGetLength(Object* object, void*);
   static Object* StringGetLength(Object* object, void*);
   static Object* ScriptGetName(Object* object, void*);
+  static Object* ScriptGetId(Object* object, void*);
   static Object* ScriptGetSource(Object* object, void*);
   static Object* ScriptGetLineOffset(Object* object, void*);
   static Object* ScriptGetColumnOffset(Object* object, void*);
diff --git a/src/api.cc b/src/api.cc
index 4d9d4f4..ac960bd 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -39,7 +39,6 @@
 #include "v8threads.h"
 
 
-namespace i = v8::internal;
 #define LOG_API(expr) LOG(ApiEntryCall(expr))
 
 
@@ -1055,6 +1054,22 @@
 }
 
 
+Local<Value> Script::Id() {
+  ON_BAILOUT("v8::Script::Id()", return Local<Value>());
+  LOG_API("Script::Id");
+  i::Object* raw_id = NULL;
+  {
+    HandleScope scope;
+    i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+    i::Handle<i::Script> script(i::Script::cast(fun->shared()->script()));
+    i::Handle<i::Object> id(script->id());
+    raw_id = *id;
+  }
+  i::Handle<i::Object> id(raw_id);
+  return Utils::ToLocal(id);
+}
+
+
 // --- E x c e p t i o n s ---
 
 
@@ -2185,7 +2200,7 @@
 
 
 const char* v8::V8::GetVersion() {
-  return "1.0.3.6";
+  return "1.1.0";
 }
 
 
@@ -2961,6 +2976,11 @@
 }
 
 
+bool Debug::EnableAgent(int port) {
+  return i::Debugger::StartAgent(port);
+}
+
+
 namespace internal {
 
 
diff --git a/src/assembler-arm.h b/src/assembler-arm.h
index d8aa068..48b20ee 100644
--- a/src/assembler-arm.h
+++ b/src/assembler-arm.h
@@ -83,6 +83,8 @@
 };
 
 
+const int kNumRegisters = 16;
+
 extern Register no_reg;
 extern Register r0;
 extern Register r1;
@@ -211,6 +213,15 @@
 }
 
 
+// Branch hints are not used on the ARM.  They are defined so that they can
+// appear in shared function signatures, but will be ignored in ARM
+// implementations.
+enum Hint { no_hint };
+
+// Hints are not used on the arm.  Negating is trivial.
+inline Hint NegateHint(Hint ignored) { return no_hint; }
+
+
 // The pc store offset may be 8 or 12 depending on the processor implementation.
 int PcStoreOffset();
 
diff --git a/src/assembler-ia32-inl.h b/src/assembler-ia32-inl.h
index 57e783f..c5213a7 100644
--- a/src/assembler-ia32-inl.h
+++ b/src/assembler-ia32-inl.h
@@ -279,7 +279,8 @@
 
 void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
   ASSERT(len_ == 1 || len_ == 2);
-  *reinterpret_cast<int32_t*>(&buf_[len_]) = disp;
+  int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
+  *p = disp;
   len_ += sizeof(int32_t);
   rmode_ = rmode;
 }
diff --git a/src/assembler-ia32.cc b/src/assembler-ia32.cc
index f57229f..ee51cb4 100644
--- a/src/assembler-ia32.cc
+++ b/src/assembler-ia32.cc
@@ -751,6 +751,18 @@
 }
 
 
+void Assembler::xchg(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (src.is(eax) || dst.is(eax)) {  // Single-byte encoding
+    EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
+  } else {
+    EMIT(0x87);
+    EMIT(0xC0 | src.code() << 3 | dst.code());
+  }
+}
+
+
 void Assembler::adc(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1847,6 +1859,16 @@
 }
 
 
+void Assembler::setcc(Condition cc, Register reg) {
+  ASSERT(reg.is_byte_register());
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x0F);
+  EMIT(0x90 | cc);
+  EMIT(0xC0 | reg.code());
+}
+
+
 void Assembler::cvttss2si(Register dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
   EnsureSpace ensure_space(this);
diff --git a/src/assembler-ia32.h b/src/assembler-ia32.h
index 4a88b9d..a664093 100644
--- a/src/assembler-ia32.h
+++ b/src/assembler-ia32.h
@@ -63,6 +63,8 @@
 struct Register {
   bool is_valid() const  { return 0 <= code_ && code_ < 8; }
   bool is(Register reg) const  { return code_ == reg.code_; }
+  // eax, ebx, ecx and edx are byte registers, the rest are not.
+  bool is_byte_register() const  { return code_ <= 3; }
   int code() const  {
     ASSERT(is_valid());
     return code_;
@@ -76,6 +78,8 @@
   int code_;
 };
 
+const int kNumRegisters = 8;
+
 extern Register eax;
 extern Register ecx;
 extern Register edx;
@@ -173,6 +177,15 @@
   taken = 0x3e
 };
 
+// The result of negating a hint is as if the corresponding condition
+// were negated by NegateCondition.  That is, no_hint is mapped to
+// itself and not_taken and taken are mapped to each other.
+inline Hint NegateHint(Hint hint) {
+  return (hint == no_hint)
+      ? no_hint
+      : ((hint == not_taken) ? taken : not_taken);
+}
+
 
 // -----------------------------------------------------------------------------
 // Machine instruction Immediates
@@ -494,6 +507,9 @@
   void cmov(Condition cc, Register dst, Handle<Object> handle);
   void cmov(Condition cc, Register dst, const Operand& src);
 
+  // Exchange two registers
+  void xchg(Register dst, Register src);
+
   // Arithmetics
   void adc(Register dst, int32_t imm32);
   void adc(Register dst, const Operand& src);
@@ -674,6 +690,7 @@
   void frndint();
 
   void sahf();
+  void setcc(Condition cc, Register reg);
 
   void cpuid();
 
diff --git a/src/assembler.h b/src/assembler.h
index 8d21889..49c9b90 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -48,7 +48,7 @@
 // unknown pc location. Assembler::bind() is used to bind a label to the
 // current pc. A label can be bound only once.
 
-class Label : public ZoneObject {  // LabelShadows are dynamically allocated.
+class Label BASE_EMBEDDED {
  public:
   INLINE(Label())                 { Unuse(); }
   INLINE(~Label())                { ASSERT(!is_linked()); }
@@ -84,58 +84,11 @@
   friend class Assembler;
   friend class RegexpAssembler;
   friend class Displacement;
-  friend class LabelShadow;
+  friend class ShadowTarget;
   friend class RegExpMacroAssemblerIrregexp;
 };
 
 
-// A LabelShadow represents a label that is temporarily shadowed by another
-// label (represented by the original label during shadowing). They are used
-// to catch jumps to labels in certain contexts, e.g. try blocks.  After
-// shadowing ends, the formerly shadowed label is again represented by the
-// original label and the LabelShadow can be used as a label in its own
-// right, representing the formerly shadowing label.
-class LabelShadow : public Label {
- public:
-  explicit LabelShadow(Label* original) {
-    ASSERT(original != NULL);
-    original_label_ = original;
-    original_pos_ = original->pos_;
-    original->Unuse();
-#ifdef DEBUG
-    is_shadowing_ = true;
-#endif
-  }
-
-  ~LabelShadow() {
-    ASSERT(!is_shadowing_);
-  }
-
-  void StopShadowing() {
-    ASSERT(is_shadowing_ && is_unused());
-    pos_ = original_label_->pos_;
-    original_label_->pos_ = original_pos_;
-#ifdef DEBUG
-    is_shadowing_ = false;
-#endif
-  }
-
-  Label* original_label() const { return original_label_; }
-
- private:
-  // During shadowing, the currently shadowing label.  After shadowing, the
-  // label that was shadowed.
-  Label* original_label_;
-
-  // During shadowing, the saved state of the original label.
-  int original_pos_;
-
-#ifdef DEBUG
-  bool is_shadowing_;
-#endif
-};
-
-
 // -----------------------------------------------------------------------------
 // Relocation information
 
diff --git a/src/ast.cc b/src/ast.cc
index 3197aa9..1a6010a 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -135,8 +135,12 @@
   Object* k = *key->handle();
   if (k->IsSymbol() && Heap::Proto_symbol()->Equals(String::cast(k))) {
     kind_ = PROTOTYPE;
+  } else if (value_->AsMaterializedLiteral() != NULL) {
+    kind_ = MATERIALIZED_LITERAL;
+  } else if (value_->AsLiteral() != NULL) {
+    kind_ = CONSTANT;
   } else {
-    kind_ = value_->AsLiteral() == NULL ? COMPUTED : CONSTANT;
+    kind_ = COMPUTED;
   }
 }
 
@@ -148,13 +152,13 @@
 }
 
 
-void LabelCollector::AddLabel(Label* label) {
+void TargetCollector::AddTarget(BreakTarget* target) {
   // Add the label to the collector, but discard duplicates.
-  int length = labels_->length();
+  int length = targets_->length();
   for (int i = 0; i < length; i++) {
-    if (labels_->at(i) == label) return;
+    if (targets_->at(i) == target) return;
   }
-  labels_->Add(label);
+  targets_->Add(target);
 }
 
 
diff --git a/src/ast.h b/src/ast.h
index c0fa414..66feecf 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -35,6 +35,7 @@
 #include "variables.h"
 #include "macro-assembler.h"
 #include "jsregexp.h"
+#include "jump-target.h"
 
 namespace v8 { namespace internal {
 
@@ -92,6 +93,10 @@
   V(ThisFunction)
 
 
+// Forward declarations
+class TargetCollector;
+class MaterializedLiteral;
+
 #define DEF_FORWARD_DECLARATION(type) class type;
 NODE_LIST(DEF_FORWARD_DECLARATION)
 #undef DEF_FORWARD_DECLARATION
@@ -118,13 +123,16 @@
   virtual VariableProxy* AsVariableProxy() { return NULL; }
   virtual Property* AsProperty() { return NULL; }
   virtual Call* AsCall() { return NULL; }
-  virtual LabelCollector* AsLabelCollector() { return NULL; }
+  virtual TargetCollector* AsTargetCollector() { return NULL; }
   virtual BreakableStatement* AsBreakableStatement() { return NULL; }
   virtual IterationStatement* AsIterationStatement() { return NULL; }
   virtual UnaryOperation* AsUnaryOperation() { return NULL; }
   virtual BinaryOperation* AsBinaryOperation() { return NULL; }
   virtual Assignment* AsAssignment() { return NULL; }
   virtual FunctionLiteral* AsFunctionLiteral() { return NULL; }
+  virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
+  virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
+  virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
 
   void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
   int statement_pos() const { return statement_pos_; }
@@ -192,12 +200,7 @@
   virtual BreakableStatement* AsBreakableStatement() { return this; }
 
   // Code generation
-  Label* break_target() { return &break_target_; }
-
-  // Used during code generation for restoring the stack when a
-  // break/continue crosses a statement that keeps stuff on the stack.
-  int break_stack_height() { return break_stack_height_; }
-  void set_break_stack_height(int height) { break_stack_height_ = height; }
+  BreakTarget* break_target() { return &break_target_; }
 
   // Testers.
   bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
@@ -211,8 +214,7 @@
  private:
   ZoneStringList* labels_;
   Type type_;
-  Label break_target_;
-  int break_stack_height_;
+  BreakTarget break_target_;
 };
 
 
@@ -268,7 +270,7 @@
   Statement* body() const { return body_; }
 
   // Code generation
-  Label* continue_target()  { return &continue_target_; }
+  BreakTarget* continue_target()  { return &continue_target_; }
 
  protected:
   explicit IterationStatement(ZoneStringList* labels)
@@ -280,7 +282,7 @@
 
  private:
   Statement* body_;
-  Label continue_target_;
+  BreakTarget continue_target_;
 };
 
 
@@ -443,10 +445,12 @@
     CHECK(!is_default());
     return label_;
   }
+  JumpTarget* body_target() { return &body_target_; }
   ZoneList<Statement*>* statements() const  { return statements_; }
 
  private:
   Expression* label_;
+  JumpTarget body_target_;
   ZoneList<Statement*>* statements_;
 };
 
@@ -503,43 +507,45 @@
 };
 
 
-// NOTE: LabelCollectors are represented as nodes to fit in the target
+// NOTE: TargetCollectors are represented as nodes to fit in the target
 // stack in the compiler; this should probably be reworked.
-class LabelCollector: public Node {
+class TargetCollector: public Node {
  public:
-  explicit LabelCollector(ZoneList<Label*>* labels) : labels_(labels) { }
+  explicit TargetCollector(ZoneList<BreakTarget*>* targets)
+      : targets_(targets) {
+  }
 
-  // Adds a label to the collector. The collector stores a pointer not
-  // a copy of the label to make binding work, so make sure not to
-  // pass in references to something on the stack.
-  void AddLabel(Label* label);
+  // Adds a jump target to the collector. The collector stores a pointer not
+  // a copy of the target to make binding work, so make sure not to pass in
+  // references to something on the stack.
+  void AddTarget(BreakTarget* target);
 
-  // Virtual behaviour. LabelCollectors are never part of the AST.
+  // Virtual behaviour. TargetCollectors are never part of the AST.
   virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
-  virtual LabelCollector* AsLabelCollector() { return this; }
+  virtual TargetCollector* AsTargetCollector() { return this; }
 
-  ZoneList<Label*>* labels() { return labels_; }
+  ZoneList<BreakTarget*>* targets() { return targets_; }
 
  private:
-  ZoneList<Label*>* labels_;
+  ZoneList<BreakTarget*>* targets_;
 };
 
 
 class TryStatement: public Statement {
  public:
   explicit TryStatement(Block* try_block)
-      : try_block_(try_block), escaping_labels_(NULL) { }
+      : try_block_(try_block), escaping_targets_(NULL) { }
 
-  void set_escaping_labels(ZoneList<Label*>* labels) {
-    escaping_labels_ = labels;
+  void set_escaping_targets(ZoneList<BreakTarget*>* targets) {
+    escaping_targets_ = targets;
   }
 
   Block* try_block() const { return try_block_; }
-  ZoneList<Label*>* escaping_labels() const { return escaping_labels_; }
+  ZoneList<BreakTarget*>* escaping_targets() const { return escaping_targets_; }
 
  private:
   Block* try_block_;
-  ZoneList<Label*>* escaping_labels_;
+  ZoneList<BreakTarget*>* escaping_targets_;
 };
 
 
@@ -624,11 +630,20 @@
 // Base class for literals that needs space in the corresponding JSFunction.
 class MaterializedLiteral: public Expression {
  public:
-  explicit MaterializedLiteral(int literal_index)
-      : literal_index_(literal_index) {}
+  explicit MaterializedLiteral(int literal_index, bool is_simple)
+      : literal_index_(literal_index), is_simple_(is_simple) {}
+
+  virtual MaterializedLiteral* AsMaterializedLiteral() { return this; }
+
   int literal_index() { return literal_index_; }
+
+  // A materialized literal is simple if the values consist of only
+  // constants and simple object and array literals.
+  bool is_simple() const { return is_simple_; }
+
  private:
   int literal_index_;
+  bool is_simple_;
 };
 
 
@@ -643,10 +658,11 @@
    public:
 
     enum Kind {
-      CONSTANT,       // Property with constant value (at compile time).
-      COMPUTED,       // Property with computed value (at execution time).
-      GETTER, SETTER,  // Property is an accessor function.
-      PROTOTYPE       // Property is __proto__.
+      CONSTANT,              // Property with constant value (compile time).
+      COMPUTED,              // Property with computed value (execution time).
+      MATERIALIZED_LITERAL,  // Property value is a materialized literal.
+      GETTER, SETTER,        // Property is an accessor function.
+      PROTOTYPE              // Property is __proto__.
     };
 
     Property(Literal* key, Expression* value);
@@ -664,12 +680,13 @@
 
   ObjectLiteral(Handle<FixedArray> constant_properties,
                 ZoneList<Property*>* properties,
-                int literal_index)
-      : MaterializedLiteral(literal_index),
+                int literal_index,
+                bool is_simple)
+      : MaterializedLiteral(literal_index, is_simple),
         constant_properties_(constant_properties),
-        properties_(properties) {
-  }
+        properties_(properties) {}
 
+  virtual ObjectLiteral* AsObjectLiteral() { return this; }
   virtual void Accept(AstVisitor* v);
 
   Handle<FixedArray> constant_properties() const {
@@ -689,7 +706,7 @@
   RegExpLiteral(Handle<String> pattern,
                 Handle<String> flags,
                 int literal_index)
-      : MaterializedLiteral(literal_index),
+      : MaterializedLiteral(literal_index, false),
         pattern_(pattern),
         flags_(flags) {}
 
@@ -705,14 +722,18 @@
 
 // An array literal has a literals object that is used
 // for minimizing the work when constructing it at runtime.
-class ArrayLiteral: public Expression {
+class ArrayLiteral: public MaterializedLiteral {
  public:
   ArrayLiteral(Handle<FixedArray> literals,
-               ZoneList<Expression*>* values)
-      : literals_(literals), values_(values) {
-  }
+               ZoneList<Expression*>* values,
+               int literal_index,
+               bool is_simple)
+      : MaterializedLiteral(literal_index, is_simple),
+        literals_(literals),
+        values_(values) {}
 
   virtual void Accept(AstVisitor* v);
+  virtual ArrayLiteral* AsArrayLiteral() { return this; }
 
   Handle<FixedArray> literals() const { return literals_; }
   ZoneList<Expression*>* values() const { return values_; }
@@ -860,8 +881,13 @@
 
 class Property: public Expression {
  public:
-  Property(Expression* obj, Expression* key, int pos)
-      : obj_(obj), key_(key), pos_(pos) { }
+  // Synthetic properties are property lookups introduced by the system,
+  // to objects that aren't visible to the user. Function calls to synthetic
+  // properties should use the global object as receiver, not the base object
+  // of the resolved Reference.
+  enum Type { NORMAL, SYNTHETIC };
+  Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
+      : obj_(obj), key_(key), pos_(pos), type_(type) { }
 
   virtual void Accept(AstVisitor* v);
 
@@ -873,6 +899,7 @@
   Expression* obj() const { return obj_; }
   Expression* key() const { return key_; }
   int position() const { return pos_; }
+  bool is_synthetic() const { return type_ == SYNTHETIC; }
 
   // Returns a property singleton property access on 'this'.  Used
   // during preparsing.
@@ -882,8 +909,9 @@
   Expression* obj_;
   Expression* key_;
   int pos_;
+  Type type_;
 
-  // Dummy property used during preparsing
+  // Dummy property used during preparsing.
   static Property this_property_;
 };
 
@@ -1179,6 +1207,9 @@
         is_expression_(is_expression),
         loop_nesting_(0),
         function_token_position_(RelocInfo::kNoPosition) {
+#ifdef DEBUG
+    already_compiled_ = false;
+#endif
   }
 
   virtual void Accept(AstVisitor* v);
@@ -1205,6 +1236,13 @@
   bool loop_nesting() const { return loop_nesting_; }
   void set_loop_nesting(int nesting) { loop_nesting_ = nesting; }
 
+#ifdef DEBUG
+  void mark_as_compiled() {
+    ASSERT(!already_compiled_);
+    already_compiled_ = true;
+  }
+#endif
+
  private:
   Handle<String> name_;
   Scope* scope_;
@@ -1218,6 +1256,9 @@
   bool is_expression_;
   int loop_nesting_;
   int function_token_position_;
+#ifdef DEBUG
+  bool already_compiled_;
+#endif
 };
 
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 585d034..0a0ed83 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -518,10 +518,11 @@
   {  // --- E m p t y ---
     Handle<Code> code =
         Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
-    Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
-
     empty_function->set_code(*code);
-    empty_function->shared()->set_script(*Factory::NewScript(source));
+    Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
+    Handle<Script> script = Factory::NewScript(source);
+    script->set_type(Smi::FromInt(SCRIPT_TYPE_NATIVE));
+    empty_function->shared()->set_script(*script);
     empty_function->shared()->set_start_position(0);
     empty_function->shared()->set_end_position(source->length());
     empty_function->shared()->DontAdaptArguments();
@@ -984,12 +985,19 @@
             Factory::LookupAsciiSymbol("source"),
             proxy_source,
             common_attributes);
-    Handle<Proxy> proxy_data = Factory::NewProxy(&Accessors::ScriptName);
+    Handle<Proxy> proxy_name = Factory::NewProxy(&Accessors::ScriptName);
     script_descriptors =
         Factory::CopyAppendProxyDescriptor(
             script_descriptors,
             Factory::LookupAsciiSymbol("name"),
-            proxy_data,
+            proxy_name,
+            common_attributes);
+    Handle<Proxy> proxy_id = Factory::NewProxy(&Accessors::ScriptId);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("id"),
+            proxy_id,
             common_attributes);
     Handle<Proxy> proxy_line_offset =
         Factory::NewProxy(&Accessors::ScriptLineOffset);
@@ -1028,6 +1036,7 @@
 
     // Allocate the empty script.
     Handle<Script> script = Factory::NewScript(Factory::empty_string());
+    script->set_type(Smi::FromInt(SCRIPT_TYPE_NATIVE));
     global_context()->set_empty_script(*script);
   }
 
diff --git a/src/builtins-ia32.cc b/src/builtins-ia32.cc
index d773c97..0e9de8c 100644
--- a/src/builtins-ia32.cc
+++ b/src/builtins-ia32.cc
@@ -77,9 +77,7 @@
     __ test(edi, Immediate(kSmiTagMask));
     __ j(zero, &rt_call);
     // Check that function is a JSFunction
-    __ mov(eax, FieldOperand(edi, JSFunction::kMapOffset));
-    __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
-    __ cmp(eax, JS_FUNCTION_TYPE);
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
     __ j(not_equal, &rt_call);
 
     // Verified that the constructor is a JSFunction.
@@ -91,9 +89,7 @@
     __ j(zero, &rt_call);
     // edi: constructor
     // eax: initial map (if proven valid below)
-    __ mov(ebx, FieldOperand(eax, JSFunction::kMapOffset));
-    __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-    __ cmp(ebx, MAP_TYPE);
+    __ CmpObjectType(eax, MAP_TYPE, ebx);
     __ j(not_equal, &rt_call);
 
     // Check that the constructor is not constructing a JSFunction (see comments
@@ -101,8 +97,7 @@
     // instance type would be JS_FUNCTION_TYPE.
     // edi: constructor
     // eax: initial map
-    __ movzx_b(ebx, FieldOperand(eax, Map::kInstanceTypeOffset));
-    __ cmp(ebx, JS_FUNCTION_TYPE);
+    __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
     __ j(equal, &rt_call);
 
     // Now allocate the JSObject on the heap.
@@ -391,9 +386,7 @@
     __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize));
     __ test(edi, Immediate(kSmiTagMask));
     __ j(zero, &non_function, not_taken);
-    __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));  // get the map
-    __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-    __ cmp(ecx, JS_FUNCTION_TYPE);
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
     __ j(equal, &function, taken);
 
     // Non-function called: Clear the function to force exception.
diff --git a/src/builtins.cc b/src/builtins.cc
index c4991c3..9220698 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -155,7 +155,7 @@
     Object* obj = BUILTIN_ARG(1);
     if (obj->IsSmi()) {
       int len = Smi::cast(obj)->value();
-      if (len >= 0 && len < JSObject::kMaxFastElementsLength) {
+      if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
         Object* obj = Heap::AllocateFixedArrayWithHoles(len);
         if (obj->IsFailure()) return obj;
         array->SetContent(FixedArray::cast(obj));
@@ -699,10 +699,10 @@
       // Log the event and add the code to the builtins array.
       LOG(CodeCreateEvent("Builtin", Code::cast(code), functions[i].s_name));
       builtins_[i] = code;
-#ifdef DEBUG
+#ifdef ENABLE_DISASSEMBLER
       if (FLAG_print_builtin_code) {
         PrintF("Builtin: %s\n", functions[i].s_name);
-        code->Print();
+        Code::cast(code)->Disassemble(functions[i].s_name);
         PrintF("\n");
       }
 #endif
diff --git a/src/checks.cc b/src/checks.cc
index 2f25456..f8a2f24 100644
--- a/src/checks.cc
+++ b/src/checks.cc
@@ -32,8 +32,6 @@
 #include "platform.h"
 #include "top.h"
 
-using namespace v8::internal;
-
 static int fatal_error_handler_nesting_depth = 0;
 
 // Contains protection against recursive calls (faults while handling faults).
@@ -41,21 +39,21 @@
   fatal_error_handler_nesting_depth++;
   // First time we try to print an error message
   if (fatal_error_handler_nesting_depth < 2) {
-    OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
+    i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
     va_list arguments;
     va_start(arguments, format);
-    OS::VPrintError(format, arguments);
+    i::OS::VPrintError(format, arguments);
     va_end(arguments);
-    OS::PrintError("\n#\n\n");
+    i::OS::PrintError("\n#\n\n");
   }
   // First two times we may try to print a stack dump.
   if (fatal_error_handler_nesting_depth < 3) {
-    if (FLAG_stack_trace_on_abort) {
+    if (i::FLAG_stack_trace_on_abort) {
       // Call this one twice on double fault
-      Top::PrintStack();
+      i::Top::PrintStack();
     }
   }
-  OS::Abort();
+  i::OS::Abort();
 }
 
 
@@ -90,11 +88,11 @@
 
 
 void API_Fatal(const char* location, const char* format, ...) {
-  OS::PrintError("\n#\n# Fatal error in %s\n# ", location);
+  i::OS::PrintError("\n#\n# Fatal error in %s\n# ", location);
   va_list arguments;
   va_start(arguments, format);
-  OS::VPrintError(format, arguments);
+  i::OS::VPrintError(format, arguments);
   va_end(arguments);
-  OS::PrintError("\n#\n\n");
-  OS::Abort();
+  i::OS::PrintError("\n#\n\n");
+  i::OS::Abort();
 }
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 417806f..06c4dcd 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -68,10 +68,12 @@
     LOG(CodeCreateEvent("Stub", *code, GetName()));
     Counters::total_stubs_code_size.Increment(code->instruction_size());
 
-#ifdef DEBUG
+#ifdef ENABLE_DISASSEMBLER
     if (FLAG_print_code_stubs) {
+#ifdef DEBUG
       Print();
-      code->Print();
+#endif
+      code->Disassemble(GetName());
       PrintF("\n");
     }
 #endif
diff --git a/src/codegen-arm.cc b/src/codegen-arm.cc
index 8a47f8c..3e5cc46 100644
--- a/src/codegen-arm.cc
+++ b/src/codegen-arm.cc
@@ -38,82 +38,6 @@
 #define __ masm_->
 
 // -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-VirtualFrame::VirtualFrame(CodeGenerator* cgen) {
-  ASSERT(cgen->scope() != NULL);
-
-  masm_ = cgen->masm();
-  frame_local_count_ = cgen->scope()->num_stack_slots();
-  parameter_count_ = cgen->scope()->num_parameters();
-}
-
-
-void VirtualFrame::Enter() {
-  Comment cmnt(masm_, "[ Enter JS frame");
-#ifdef DEBUG
-  { Label done, fail;
-    __ tst(r1, Operand(kSmiTagMask));
-    __ b(eq, &fail);
-    __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-    __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-    __ cmp(r2, Operand(JS_FUNCTION_TYPE));
-    __ b(eq, &done);
-    __ bind(&fail);
-    __ stop("CodeGenerator::EnterJSFrame - r1 not a function");
-    __ bind(&done);
-  }
-#endif  // DEBUG
-
-  __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
-  // Adjust FP to point to saved FP.
-  __ add(fp, sp, Operand(2 * kPointerSize));
-}
-
-
-void VirtualFrame::Exit() {
-  Comment cmnt(masm_, "[ Exit JS frame");
-  // Drop the execution stack down to the frame pointer and restore the caller
-  // frame pointer and return address.
-  __ mov(sp, fp);
-  __ ldm(ia_w, sp, fp.bit() | lr.bit());
-}
-
-
-void VirtualFrame::AllocateLocals() {
-  if (frame_local_count_ > 0) {
-    Comment cmnt(masm_, "[ Allocate space for locals");
-      // Initialize stack slots with 'undefined' value.
-    __ mov(ip, Operand(Factory::undefined_value()));
-    for (int i = 0; i < frame_local_count_; i++) {
-      __ push(ip);
-    }
-  }
-}
-
-
-void VirtualFrame::Drop(int count) {
-  ASSERT(count >= 0);
-  if (count > 0) {
-    __ add(sp, sp, Operand(count * kPointerSize));
-  }
-}
-
-
-void VirtualFrame::Pop() { Drop(1); }
-
-
-void VirtualFrame::Pop(Register reg) {
-  __ pop(reg);
-}
-
-
-void VirtualFrame::Push(Register reg) {
-  __ push(reg);
-}
-
-
-// -------------------------------------------------------------------------
 // CodeGenState implementation.
 
 CodeGenState::CodeGenState(CodeGenerator* owner)
@@ -128,8 +52,8 @@
 
 CodeGenState::CodeGenState(CodeGenerator* owner,
                            TypeofState typeof_state,
-                           Label* true_target,
-                           Label* false_target)
+                           JumpTarget* true_target,
+                           JumpTarget* false_target)
     : owner_(owner),
       typeof_state_(typeof_state),
       true_target_(true_target),
@@ -156,17 +80,18 @@
       masm_(new MacroAssembler(NULL, buffer_size)),
       scope_(NULL),
       frame_(NULL),
+      allocator_(NULL),
       cc_reg_(al),
       state_(NULL),
-      break_stack_height_(0) {
+      function_return_is_shadowed_(false),
+      in_spilled_code_(false) {
 }
 
 
 // Calling conventions:
-// r0: the number of arguments
-// fp: frame pointer
+// fp: caller's frame pointer
 // sp: stack pointer
-// pp: caller's parameter pointer
+// r1: called JS function
 // cp: callee's context
 
 void CodeGenerator::GenCode(FunctionLiteral* fun) {
@@ -175,46 +100,55 @@
   // Initialize state.
   ASSERT(scope_ == NULL);
   scope_ = fun->scope();
+  ASSERT(allocator_ == NULL);
+  RegisterAllocator register_allocator(this);
+  allocator_ = &register_allocator;
   ASSERT(frame_ == NULL);
-  VirtualFrame virtual_frame(this);
-  frame_ = &virtual_frame;
+  frame_ = new VirtualFrame(this);
   cc_reg_ = al;
+  set_in_spilled_code(false);
   {
     CodeGenState state(this);
 
-    // Entry
-    // stack: function, receiver, arguments, return address
-    // r0: number of arguments
+    // Entry:
+    // Stack: receiver, arguments
+    // lr: return address
+    // fp: caller's frame pointer
     // sp: stack pointer
-    // fp: frame pointer
-    // pp: caller's parameter pointer
+    // r1: called JS function
     // cp: callee's context
-
+    allocator_->Initialize();
     frame_->Enter();
     // tos: code slot
 #ifdef DEBUG
     if (strlen(FLAG_stop_at) > 0 &&
         fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      frame_->SpillAll();
       __ stop("stop-at");
     }
 #endif
 
     // Allocate space for locals and initialize them.
-    frame_->AllocateLocals();
+    frame_->AllocateStackSlots(scope_->num_stack_slots());
+    // Initialize the function return target after the locals are set
+    // up, because it needs the expected frame height from the frame.
+    function_return_.Initialize(this, JumpTarget::BIDIRECTIONAL);
+    function_return_is_shadowed_ = false;
 
+    VirtualFrame::SpilledScope spilled_scope(this);
     if (scope_->num_heap_slots() > 0) {
       // Allocate local context.
       // Get outer context and create a new context based on it.
       __ ldr(r0, frame_->Function());
-      frame_->Push(r0);
-      __ CallRuntime(Runtime::kNewContext, 1);  // r0 holds the result
+      frame_->EmitPush(r0);
+      frame_->CallRuntime(Runtime::kNewContext, 1);  // r0 holds the result
 
       if (kDebug) {
-        Label verified_true;
+        JumpTarget verified_true(this);
         __ cmp(r0, Operand(cp));
-        __ b(eq, &verified_true);
+        verified_true.Branch(eq);
         __ stop("NewContext: r0 is expected to be the same as cp");
-        __ bind(&verified_true);
+        verified_true.Bind();
       }
       // Update context local.
       __ str(cp, frame_->Context());
@@ -240,7 +174,7 @@
         Slot* slot = par->slot();
         if (slot != NULL && slot->type() == Slot::CONTEXT) {
           ASSERT(!scope_->is_global_scope());  // no parameters in global scope
-          __ ldr(r1, frame_->Parameter(i));
+          __ ldr(r1, frame_->ParameterAt(i));
           // Loads r2 with context; used below in RecordWrite.
           __ str(r1, SlotOperand(slot, r2));
           // Load the offset into r3.
@@ -267,14 +201,15 @@
           const int kReceiverDisplacement = 2 + scope_->num_parameters();
           __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
           __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+          frame_->Adjust(3);
           __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
-          __ CallStub(&stub);
-          frame_->Push(r0);
+          frame_->CallStub(&stub, 3);
+          frame_->EmitPush(r0);
           arguments_ref.SetValue(NOT_CONST_INIT);
         }
         shadow_ref.SetValue(NOT_CONST_INIT);
       }
-      frame_->Pop();  // Value is no longer needed.
+      frame_->Drop();  // Value is no longer needed.
     }
 
     // Generate code to 'execute' declarations and initialize functions
@@ -292,7 +227,7 @@
     }
 
     if (FLAG_trace) {
-      __ CallRuntime(Runtime::kTraceEnter, 0);
+      frame_->CallRuntime(Runtime::kTraceEnter, 0);
       // Ignore the return value.
     }
     CheckStack();
@@ -307,42 +242,56 @@
       bool should_trace =
           is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
       if (should_trace) {
-        __ CallRuntime(Runtime::kDebugTrace, 0);
+        frame_->CallRuntime(Runtime::kDebugTrace, 0);
         // Ignore the return value.
       }
 #endif
-      VisitStatements(body);
+      VisitStatementsAndSpill(body);
     }
   }
 
-  // exit
-  // r0: result
-  // sp: stack pointer
-  // fp: frame pointer
-  // pp: parameter pointer
-  // cp: callee's context
-  __ mov(r0, Operand(Factory::undefined_value()));
+  // Generate the return sequence if necessary.
+  if (frame_ != NULL || function_return_.is_linked()) {
+    // exit
+    // r0: result
+    // sp: stack pointer
+    // fp: frame pointer
+    // pp: parameter pointer
+    // cp: callee's context
+    __ mov(r0, Operand(Factory::undefined_value()));
 
-  __ bind(&function_return_);
-  if (FLAG_trace) {
-    // Push the return value on the stack as the parameter.
-    // Runtime::TraceExit returns the parameter as it is.
-    frame_->Push(r0);
-    __ CallRuntime(Runtime::kTraceExit, 1);
+    function_return_.Bind();
+    if (FLAG_trace) {
+      // Push the return value on the stack as the parameter.
+      // Runtime::TraceExit returns the parameter as it is.
+      frame_->EmitPush(r0);
+      frame_->CallRuntime(Runtime::kTraceExit, 1);
+    }
+
+    // Tear down the frame which will restore the caller's frame pointer and
+    // the link register.
+    frame_->Exit();
+
+    __ add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
+    __ mov(pc, lr);
   }
 
-  // Tear down the frame which will restore the caller's frame pointer and the
-  // link register.
-  frame_->Exit();
-
-  __ add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
-  __ mov(pc, lr);
-
   // Code generation state must be reset.
-  scope_ = NULL;
-  frame_ = NULL;
   ASSERT(!has_cc());
   ASSERT(state_ == NULL);
+  ASSERT(!function_return_is_shadowed_);
+  function_return_.Unuse();
+  DeleteFrame();
+
+  // Process any deferred code using the register allocator.
+  if (HasStackOverflow()) {
+    ClearDeferred();
+  } else {
+    ProcessDeferred();
+  }
+
+  allocator_ = NULL;
+  scope_ = NULL;
 }
 
 
@@ -359,10 +308,10 @@
   int index = slot->index();
   switch (slot->type()) {
     case Slot::PARAMETER:
-      return frame_->Parameter(index);
+      return frame_->ParameterAt(index);
 
     case Slot::LOCAL:
-      return frame_->Local(index);
+      return frame_->LocalAt(index);
 
     case Slot::CONTEXT: {
       // Follow the context chain if necessary.
@@ -397,20 +346,21 @@
 }
 
 
-MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
-                                                            Register tmp,
-                                                            Register tmp2,
-                                                            Label* slow) {
+MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
+    Slot* slot,
+    Register tmp,
+    Register tmp2,
+    JumpTarget* slow) {
   ASSERT(slot->type() == Slot::CONTEXT);
-  int index = slot->index();
   Register context = cp;
+
   for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
       if (s->calls_eval()) {
         // Check that extension is NULL.
         __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
         __ tst(tmp2, tmp2);
-        __ b(ne, slow);
+        slow->Branch(ne);
       }
       __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
       __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
@@ -420,9 +370,9 @@
   // Check that last extension is NULL.
   __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
   __ tst(tmp2, tmp2);
-  __ b(ne, slow);
+  slow->Branch(ne);
   __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
-  return ContextOperand(tmp, index);
+  return ContextOperand(tmp, slot->index());
 }
 
 
@@ -434,89 +384,114 @@
 // test for 'true'.
 void CodeGenerator::LoadCondition(Expression* x,
                                   TypeofState typeof_state,
-                                  Label* true_target,
-                                  Label* false_target,
+                                  JumpTarget* true_target,
+                                  JumpTarget* false_target,
                                   bool force_cc) {
+  ASSERT(!in_spilled_code());
   ASSERT(!has_cc());
+  int original_height = frame_->height();
 
   { CodeGenState new_state(this, typeof_state, true_target, false_target);
     Visit(x);
+
+    // If we hit a stack overflow, we may not have actually visited
+    // the expression.  In that case, we ensure that we have a
+    // valid-looking frame state because we will continue to generate
+    // code as we unwind the C++ stack.
+    //
+    // It's possible to have both a stack overflow and a valid frame
+    // state (eg, a subexpression overflowed, visiting it returned
+    // with a dummied frame state, and visiting this expression
+    // returned with a normal-looking state).
+    if (HasStackOverflow() &&
+        has_valid_frame() &&
+        !has_cc() &&
+        frame_->height() == original_height) {
+      true_target->Jump();
+    }
   }
-  if (force_cc && !has_cc()) {
+  if (force_cc && frame_ != NULL && !has_cc()) {
     // Convert the TOS value to a boolean in the condition code register.
-    // Visiting an expression may possibly choose neither (a) to leave a
-    // value in the condition code register nor (b) to leave a value in TOS
-    // (eg, by compiling to only jumps to the targets).  In that case the
-    // code generated by ToBoolean is wrong because it assumes the value of
-    // the expression in TOS.  So long as there is always a value in TOS or
-    // the condition code register when control falls through to here (there
-    // is), the code generated by ToBoolean is dead and therefore safe.
     ToBoolean(true_target, false_target);
   }
-  ASSERT(has_cc() || !force_cc);
+  ASSERT(!force_cc || !has_valid_frame() || has_cc());
+  ASSERT(!has_valid_frame() ||
+         (has_cc() && frame_->height() == original_height) ||
+         (!has_cc() && frame_->height() == original_height + 1));
 }
 
 
 void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
-  Label true_target;
-  Label false_target;
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  ASSERT(!in_spilled_code());
+  JumpTarget true_target(this);
+  JumpTarget false_target(this);
   LoadCondition(x, typeof_state, &true_target, &false_target, false);
 
   if (has_cc()) {
-    // convert cc_reg_ into a bool
-    Label loaded, materialize_true;
-    __ b(cc_reg_, &materialize_true);
+    // Convert cc_reg_ into a boolean value.
+    JumpTarget loaded(this);
+    JumpTarget materialize_true(this);
+    materialize_true.Branch(cc_reg_);
     __ mov(r0, Operand(Factory::false_value()));
-    frame_->Push(r0);
-    __ b(&loaded);
-    __ bind(&materialize_true);
+    frame_->EmitPush(r0);
+    loaded.Jump();
+    materialize_true.Bind();
     __ mov(r0, Operand(Factory::true_value()));
-    frame_->Push(r0);
-    __ bind(&loaded);
+    frame_->EmitPush(r0);
+    loaded.Bind();
     cc_reg_ = al;
   }
 
   if (true_target.is_linked() || false_target.is_linked()) {
-    // we have at least one condition value
-    // that has been "translated" into a branch,
-    // thus it needs to be loaded explicitly again
-    Label loaded;
-    __ b(&loaded);  // don't lose current TOS
+    // We have at least one condition value that has been "translated"
+    // into a branch, thus it needs to be loaded explicitly.
+    JumpTarget loaded(this);
+    if (frame_ != NULL) {
+      loaded.Jump();  // Don't lose the current TOS.
+    }
     bool both = true_target.is_linked() && false_target.is_linked();
-    // reincarnate "true", if necessary
+    // Load "true" if necessary.
     if (true_target.is_linked()) {
-      __ bind(&true_target);
+      true_target.Bind();
       __ mov(r0, Operand(Factory::true_value()));
-      frame_->Push(r0);
+      frame_->EmitPush(r0);
     }
-    // if both "true" and "false" need to be reincarnated,
-    // jump across code for "false"
-    if (both)
-      __ b(&loaded);
-    // reincarnate "false", if necessary
+    // If both "true" and "false" need to be loaded jump across the code for
+    // "false".
+    if (both) {
+      loaded.Jump();
+    }
+    // Load "false" if necessary.
     if (false_target.is_linked()) {
-      __ bind(&false_target);
+      false_target.Bind();
       __ mov(r0, Operand(Factory::false_value()));
-      frame_->Push(r0);
+      frame_->EmitPush(r0);
     }
-    // everything is loaded at this point
-    __ bind(&loaded);
+    // A value is loaded on all paths reaching this point.
+    loaded.Bind();
   }
+  ASSERT(has_valid_frame());
   ASSERT(!has_cc());
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::LoadGlobal() {
+  VirtualFrame::SpilledScope spilled_scope(this);
   __ ldr(r0, GlobalObject());
-  frame_->Push(r0);
+  frame_->EmitPush(r0);
 }
 
 
 void CodeGenerator::LoadGlobalReceiver(Register scratch) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
   __ ldr(scratch,
          FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
-  frame_->Push(scratch);
+  frame_->EmitPush(scratch);
 }
 
 
@@ -524,6 +499,7 @@
 // that we have the INSIDE_TYPEOF typeof state. => Need to handle global
 // variables w/o reference errors elsewhere.
 void CodeGenerator::LoadTypeofExpression(Expression* x) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   Variable* variable = x->AsVariableProxy()->AsVariable();
   if (variable != NULL && !variable->is_this() && variable->is_global()) {
     // NOTE: This is somewhat nasty. We force the compiler to load
@@ -534,9 +510,9 @@
     // TODO(1241834): Fetch the position from the variable instead of using
     // no position.
     Property property(&global, &key, RelocInfo::kNoPosition);
-    Load(&property);
+    LoadAndSpill(&property);
   } else {
-    Load(x, INSIDE_TYPEOF);
+    LoadAndSpill(x, INSIDE_TYPEOF);
   }
 }
 
@@ -553,6 +529,7 @@
 
 
 void CodeGenerator::LoadReference(Reference* ref) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ LoadReference");
   Expression* e = ref->expression();
   Property* property = e->AsProperty();
@@ -561,7 +538,7 @@
   if (property != NULL) {
     // The expression is either a property or a variable proxy that rewrites
     // to a property.
-    Load(property->obj());
+    LoadAndSpill(property->obj());
     // We use a named reference if the key is a literal symbol, unless it is
     // a string that can be legally parsed as an integer.  This is because
     // otherwise we will not get into the slow case code that handles [] on
@@ -573,7 +550,7 @@
         !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
       ref->set_type(Reference::NAMED);
     } else {
-      Load(property->key());
+      LoadAndSpill(property->key());
       ref->set_type(Reference::KEYED);
     }
   } else if (var != NULL) {
@@ -588,20 +565,21 @@
     }
   } else {
     // Anything else is a runtime error.
-    Load(e);
-    __ CallRuntime(Runtime::kThrowReferenceError, 1);
+    LoadAndSpill(e);
+    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
   }
 }
 
 
 void CodeGenerator::UnloadReference(Reference* ref) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   // Pop a reference from the stack while preserving TOS.
   Comment cmnt(masm_, "[ UnloadReference");
   int size = ref->size();
   if (size > 0) {
-    frame_->Pop(r0);
+    frame_->EmitPop(r0);
     frame_->Drop(size);
-    frame_->Push(r0);
+    frame_->EmitPush(r0);
   }
 }
 
@@ -609,35 +587,36 @@
 // ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
 // register to a boolean in the condition code register. The code
 // may jump to 'false_target' in case the register converts to 'false'.
-void CodeGenerator::ToBoolean(Label* true_target,
-                              Label* false_target) {
+void CodeGenerator::ToBoolean(JumpTarget* true_target,
+                              JumpTarget* false_target) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   // Note: The generated code snippet does not change stack variables.
   //       Only the condition code should be set.
-  frame_->Pop(r0);
+  frame_->EmitPop(r0);
 
   // Fast case checks
 
   // Check if the value is 'false'.
   __ cmp(r0, Operand(Factory::false_value()));
-  __ b(eq, false_target);
+  false_target->Branch(eq);
 
   // Check if the value is 'true'.
   __ cmp(r0, Operand(Factory::true_value()));
-  __ b(eq, true_target);
+  true_target->Branch(eq);
 
   // Check if the value is 'undefined'.
   __ cmp(r0, Operand(Factory::undefined_value()));
-  __ b(eq, false_target);
+  false_target->Branch(eq);
 
   // Check if the value is a smi.
   __ cmp(r0, Operand(Smi::FromInt(0)));
-  __ b(eq, false_target);
+  false_target->Branch(eq);
   __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, true_target);
+  true_target->Branch(eq);
 
   // Slow case: call the runtime.
-  frame_->Push(r0);
-  __ CallRuntime(Runtime::kToBool, 1);
+  frame_->EmitPush(r0);
+  frame_->CallRuntime(Runtime::kToBool, 1);
   // Convert the result (r0) to a condition code.
   __ cmp(r0, Operand(Factory::false_value()));
 
@@ -724,6 +703,7 @@
 
 
 void CodeGenerator::GenericBinaryOperation(Token::Value op) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   // sp[0] : y
   // sp[1] : x
   // result : r0
@@ -739,29 +719,33 @@
     case Token::SHL:
     case Token::SHR:
     case Token::SAR: {
-      frame_->Pop(r0);  // r0 : y
-      frame_->Pop(r1);  // r1 : x
+      frame_->EmitPop(r0);  // r0 : y
+      frame_->EmitPop(r1);  // r1 : x
       GenericBinaryOpStub stub(op);
-      __ CallStub(&stub);
+      frame_->CallStub(&stub, 0);
       break;
     }
 
     case Token::DIV: {
-      __ mov(r0, Operand(1));
-      __ InvokeBuiltin(Builtins::DIV, CALL_JS);
+      Result arg_count = allocator_->Allocate(r0);
+      ASSERT(arg_count.is_valid());
+      __ mov(arg_count.reg(), Operand(1));
+      frame_->InvokeBuiltin(Builtins::DIV, CALL_JS, &arg_count, 2);
       break;
     }
 
     case Token::MOD: {
-      __ mov(r0, Operand(1));
-      __ InvokeBuiltin(Builtins::MOD, CALL_JS);
+      Result arg_count = allocator_->Allocate(r0);
+      ASSERT(arg_count.is_valid());
+      __ mov(arg_count.reg(), Operand(1));
+      frame_->InvokeBuiltin(Builtins::MOD, CALL_JS, &arg_count, 2);
       break;
     }
 
     case Token::COMMA:
-      frame_->Pop(r0);
+      frame_->EmitPop(r0);
       // simply discard left value
-      frame_->Pop();
+      frame_->Drop();
       break;
 
     default:
@@ -772,74 +756,20 @@
 }
 
 
-class DeferredInlinedSmiOperation: public DeferredCode {
+class DeferredInlineSmiOperation: public DeferredCode {
  public:
-  DeferredInlinedSmiOperation(CodeGenerator* generator, Token::Value op,
-                              int value, bool reversed) :
-      DeferredCode(generator), op_(op), value_(value), reversed_(reversed) {
+  DeferredInlineSmiOperation(CodeGenerator* generator,
+                             Token::Value op,
+                             int value,
+                             bool reversed)
+      : DeferredCode(generator),
+        op_(op),
+        value_(value),
+        reversed_(reversed) {
     set_comment("[ DeferredInlinedSmiOperation");
   }
 
-  virtual void Generate() {
-    switch (op_) {
-      case Token::ADD: {
-        if (reversed_) {
-          // revert optimistic add
-          __ sub(r0, r0, Operand(Smi::FromInt(value_)));
-          __ mov(r1, Operand(Smi::FromInt(value_)));  // x
-        } else {
-          // revert optimistic add
-          __ sub(r1, r0, Operand(Smi::FromInt(value_)));
-          __ mov(r0, Operand(Smi::FromInt(value_)));
-        }
-        break;
-      }
-
-      case Token::SUB: {
-        if (reversed_) {
-          // revert optimistic sub
-          __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
-          __ mov(r1, Operand(Smi::FromInt(value_)));
-        } else {
-          __ add(r1, r0, Operand(Smi::FromInt(value_)));
-          __ mov(r0, Operand(Smi::FromInt(value_)));
-        }
-        break;
-      }
-
-      case Token::BIT_OR:
-      case Token::BIT_XOR:
-      case Token::BIT_AND: {
-        if (reversed_) {
-          __ mov(r1, Operand(Smi::FromInt(value_)));
-        } else {
-          __ mov(r1, Operand(r0));
-          __ mov(r0, Operand(Smi::FromInt(value_)));
-        }
-        break;
-      }
-
-      case Token::SHL:
-      case Token::SHR:
-      case Token::SAR: {
-        if (!reversed_) {
-          __ mov(r1, Operand(r0));
-          __ mov(r0, Operand(Smi::FromInt(value_)));
-        } else {
-          UNREACHABLE();  // should have been handled in SmiOperation
-        }
-        break;
-      }
-
-      default:
-        // other cases should have been handled before this point.
-        UNREACHABLE();
-        break;
-    }
-
-    GenericBinaryOpStub igostub(op_);
-    __ CallStub(&igostub);
-  }
+  virtual void Generate();
 
  private:
   Token::Value op_;
@@ -848,9 +778,80 @@
 };
 
 
+void DeferredInlineSmiOperation::Generate() {
+  enter()->Bind();
+  VirtualFrame::SpilledScope spilled_scope(generator());
+
+  switch (op_) {
+    case Token::ADD: {
+      if (reversed_) {
+        // revert optimistic add
+        __ sub(r0, r0, Operand(Smi::FromInt(value_)));
+        __ mov(r1, Operand(Smi::FromInt(value_)));
+      } else {
+        // revert optimistic add
+        __ sub(r1, r0, Operand(Smi::FromInt(value_)));
+        __ mov(r0, Operand(Smi::FromInt(value_)));
+      }
+      break;
+    }
+
+    case Token::SUB: {
+      if (reversed_) {
+        // revert optimistic sub
+        __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
+        __ mov(r1, Operand(Smi::FromInt(value_)));
+      } else {
+        __ add(r1, r0, Operand(Smi::FromInt(value_)));
+        __ mov(r0, Operand(Smi::FromInt(value_)));
+      }
+      break;
+    }
+
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND: {
+      if (reversed_) {
+        __ mov(r1, Operand(Smi::FromInt(value_)));
+      } else {
+        __ mov(r1, Operand(r0));
+        __ mov(r0, Operand(Smi::FromInt(value_)));
+      }
+      break;
+    }
+
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR: {
+      if (!reversed_) {
+        __ mov(r1, Operand(r0));
+        __ mov(r0, Operand(Smi::FromInt(value_)));
+      } else {
+        UNREACHABLE();  // should have been handled in SmiOperation
+      }
+      break;
+    }
+
+    default:
+      // other cases should have been handled before this point.
+      UNREACHABLE();
+      break;
+  }
+
+  GenericBinaryOpStub igostub(op_);
+  Result arg0 = generator()->allocator()->Allocate(r0);
+  ASSERT(arg0.is_valid());
+  Result arg1 = generator()->allocator()->Allocate(r1);
+  ASSERT(arg1.is_valid());
+  generator()->frame()->CallStub(&igostub, &arg0, &arg1, 0);
+  exit_.Jump();
+}
+
+
 void CodeGenerator::SmiOperation(Token::Value op,
                                  Handle<Object> value,
                                  bool reversed) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   // NOTE: This is an attempt to inline (a bit) more of the code for
   // some possible smi operations (like + and -) when (at least) one
   // of the operands is a literal smi. With this optimization, the
@@ -862,35 +863,35 @@
 
   int int_value = Smi::cast(*value)->value();
 
-  Label exit;
-  frame_->Pop(r0);
+  JumpTarget exit(this);
+  frame_->EmitPop(r0);
 
   switch (op) {
     case Token::ADD: {
       DeferredCode* deferred =
-        new DeferredInlinedSmiOperation(this, op, int_value, reversed);
+        new DeferredInlineSmiOperation(this, op, int_value, reversed);
 
       __ add(r0, r0, Operand(value), SetCC);
-      __ b(vs, deferred->enter());
+      deferred->enter()->Branch(vs);
       __ tst(r0, Operand(kSmiTagMask));
-      __ b(ne, deferred->enter());
-      __ bind(deferred->exit());
+      deferred->enter()->Branch(ne);
+      deferred->BindExit();
       break;
     }
 
     case Token::SUB: {
       DeferredCode* deferred =
-        new DeferredInlinedSmiOperation(this, op, int_value, reversed);
+        new DeferredInlineSmiOperation(this, op, int_value, reversed);
 
       if (!reversed) {
         __ sub(r0, r0, Operand(value), SetCC);
       } else {
         __ rsb(r0, r0, Operand(value), SetCC);
       }
-      __ b(vs, deferred->enter());
+      deferred->enter()->Branch(vs);
       __ tst(r0, Operand(kSmiTagMask));
-      __ b(ne, deferred->enter());
-      __ bind(deferred->exit());
+      deferred->enter()->Branch(ne);
+      deferred->BindExit();
       break;
     }
 
@@ -898,16 +899,16 @@
     case Token::BIT_XOR:
     case Token::BIT_AND: {
       DeferredCode* deferred =
-        new DeferredInlinedSmiOperation(this, op, int_value, reversed);
+        new DeferredInlineSmiOperation(this, op, int_value, reversed);
       __ tst(r0, Operand(kSmiTagMask));
-      __ b(ne, deferred->enter());
+      deferred->enter()->Branch(ne);
       switch (op) {
         case Token::BIT_OR:  __ orr(r0, r0, Operand(value)); break;
         case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
         case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
         default: UNREACHABLE();
       }
-      __ bind(deferred->exit());
+      deferred->BindExit();
       break;
     }
 
@@ -916,23 +917,23 @@
     case Token::SAR: {
       if (reversed) {
         __ mov(ip, Operand(value));
-        frame_->Push(ip);
-        frame_->Push(r0);
+        frame_->EmitPush(ip);
+        frame_->EmitPush(r0);
         GenericBinaryOperation(op);
 
       } else {
         int shift_value = int_value & 0x1f;  // least significant 5 bits
         DeferredCode* deferred =
-          new DeferredInlinedSmiOperation(this, op, shift_value, false);
+          new DeferredInlineSmiOperation(this, op, shift_value, false);
         __ tst(r0, Operand(kSmiTagMask));
-        __ b(ne, deferred->enter());
+        deferred->enter()->Branch(ne);
         __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // remove tags
         switch (op) {
           case Token::SHL: {
             __ mov(r2, Operand(r2, LSL, shift_value));
             // check that the *unsigned* result fits in a smi
             __ add(r3, r2, Operand(0x40000000), SetCC);
-            __ b(mi, deferred->enter());
+            deferred->enter()->Branch(mi);
             break;
           }
           case Token::SHR: {
@@ -947,7 +948,7 @@
             // smi tagging these two cases can only happen with shifts
             // by 0 or 1 when handed a valid smi
             __ and_(r3, r2, Operand(0xc0000000), SetCC);
-            __ b(ne, deferred->enter());
+            deferred->enter()->Branch(ne);
             break;
           }
           case Token::SAR: {
@@ -960,30 +961,31 @@
           default: UNREACHABLE();
         }
         __ mov(r0, Operand(r2, LSL, kSmiTagSize));
-        __ bind(deferred->exit());
+        deferred->BindExit();
       }
       break;
     }
 
     default:
       if (!reversed) {
-        frame_->Push(r0);
+        frame_->EmitPush(r0);
         __ mov(r0, Operand(value));
-        frame_->Push(r0);
+        frame_->EmitPush(r0);
       } else {
         __ mov(ip, Operand(value));
-        frame_->Push(ip);
-        frame_->Push(r0);
+        frame_->EmitPush(ip);
+        frame_->EmitPush(r0);
       }
       GenericBinaryOperation(op);
       break;
   }
 
-  __ bind(&exit);
+  exit.Bind();
 }
 
 
 void CodeGenerator::Comparison(Condition cc, bool strict) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   // sp[0] : y
   // sp[1] : x
   // result : cc register
@@ -991,29 +993,29 @@
   // Strict only makes sense for equality comparisons.
   ASSERT(!strict || cc == eq);
 
-  Label exit, smi;
+  JumpTarget exit(this);
+  JumpTarget smi(this);
   // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
   if (cc == gt || cc == le) {
     cc = ReverseCondition(cc);
-    frame_->Pop(r1);
-    frame_->Pop(r0);
+    frame_->EmitPop(r1);
+    frame_->EmitPop(r0);
   } else {
-    frame_->Pop(r0);
-    frame_->Pop(r1);
+    frame_->EmitPop(r0);
+    frame_->EmitPop(r1);
   }
   __ orr(r2, r0, Operand(r1));
   __ tst(r2, Operand(kSmiTagMask));
-  __ b(eq, &smi);
+  smi.Branch(eq);
 
   // Perform non-smi comparison by runtime call.
-  frame_->Push(r1);
+  frame_->EmitPush(r1);
 
   // Figure out which native to call and setup the arguments.
   Builtins::JavaScript native;
-  int argc;
+  int arg_count = 1;
   if (cc == eq) {
     native = strict ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
-    argc = 1;
   } else {
     native = Builtins::COMPARE;
     int ncr;  // NaN compare result
@@ -1023,24 +1025,30 @@
       ASSERT(cc == gt || cc == ge);  // remaining cases
       ncr = LESS;
     }
-    frame_->Push(r0);
+    frame_->EmitPush(r0);
+    arg_count++;
     __ mov(r0, Operand(Smi::FromInt(ncr)));
-    argc = 2;
   }
 
   // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
-  frame_->Push(r0);
-  __ mov(r0, Operand(argc));
-  __ InvokeBuiltin(native, CALL_JS);
-  __ cmp(r0, Operand(0));
-  __ b(&exit);
+  frame_->EmitPush(r0);
+  Result arg_count_register = allocator_->Allocate(r0);
+  ASSERT(arg_count_register.is_valid());
+  __ mov(arg_count_register.reg(), Operand(arg_count));
+  Result result = frame_->InvokeBuiltin(native,
+                                        CALL_JS,
+                                        &arg_count_register,
+                                        arg_count + 1);
+  __ cmp(result.reg(), Operand(0));
+  result.Unuse();
+  exit.Jump();
 
   // test smi equality by pointer comparison.
-  __ bind(&smi);
+  smi.Bind();
   __ cmp(r1, Operand(r0));
 
-  __ bind(&exit);
+  exit.Bind();
   cc_reg_ = cc;
 }
 
@@ -1066,64 +1074,93 @@
 // Call the function on the stack with the given arguments.
 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
                                          int position) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   // Push the arguments ("left-to-right") on the stack.
-  for (int i = 0; i < args->length(); i++) {
-    Load(args->at(i));
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    LoadAndSpill(args->at(i));
   }
 
   // Record the position for debugging purposes.
   CodeForSourcePosition(position);
 
   // Use the shared code stub to call the function.
-  CallFunctionStub call_function(args->length());
-  __ CallStub(&call_function);
+  CallFunctionStub call_function(arg_count);
+  frame_->CallStub(&call_function, arg_count + 1);
 
   // Restore context and pop function from the stack.
   __ ldr(cp, frame_->Context());
-  frame_->Pop();  // discard the TOS
+  frame_->Drop();  // discard the TOS
 }
 
 
-void CodeGenerator::Branch(bool if_true, Label* L) {
+void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   ASSERT(has_cc());
   Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
-  __ b(cc, L);
+  target->Branch(cc);
   cc_reg_ = al;
 }
 
 
 void CodeGenerator::CheckStack() {
+  VirtualFrame::SpilledScope spilled_scope(this);
   if (FLAG_check_stack) {
     Comment cmnt(masm_, "[ check stack");
     StackCheckStub stub;
-    __ CallStub(&stub);
+    frame_->CallStub(&stub, 0);
   }
 }
 
 
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
+  for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
+    VisitAndSpill(statements->at(i));
+  }
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
 void CodeGenerator::VisitBlock(Block* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ Block");
-  CodeForStatement(node);
-  node->set_break_stack_height(break_stack_height_);
-  VisitStatements(node->statements());
-  __ bind(node->break_target());
+  CodeForStatementPosition(node);
+  node->break_target()->Initialize(this);
+  VisitStatementsAndSpill(node->statements());
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->break_target()->Unuse();
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
 }
 
 
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   __ mov(r0, Operand(pairs));
-  frame_->Push(r0);
-  frame_->Push(cp);
+  frame_->EmitPush(r0);
+  frame_->EmitPush(cp);
   __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
-  frame_->Push(r0);
-  __ CallRuntime(Runtime::kDeclareGlobals, 3);
+  frame_->EmitPush(r0);
+  frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
   // The result is discarded.
 }
 
 
 void CodeGenerator::VisitDeclaration(Declaration* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ Declaration");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
   Variable* var = node->proxy()->var();
   ASSERT(var != NULL);  // must have been resolved
   Slot* slot = var->slot();
@@ -1136,29 +1173,30 @@
     // during variable resolution and must have mode DYNAMIC.
     ASSERT(var->is_dynamic());
     // For now, just do a runtime call.
-    frame_->Push(cp);
+    frame_->EmitPush(cp);
     __ mov(r0, Operand(var->name()));
-    frame_->Push(r0);
+    frame_->EmitPush(r0);
     // Declaration nodes are always declared in only two modes.
     ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
     PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
     __ mov(r0, Operand(Smi::FromInt(attr)));
-    frame_->Push(r0);
+    frame_->EmitPush(r0);
     // Push initial value, if any.
     // Note: For variables we must not push an initial value (such as
     // 'undefined') because we may have a (legal) redeclaration and we
     // must not destroy the current value.
     if (node->mode() == Variable::CONST) {
       __ mov(r0, Operand(Factory::the_hole_value()));
-      frame_->Push(r0);
+      frame_->EmitPush(r0);
     } else if (node->fun() != NULL) {
-      Load(node->fun());
+      LoadAndSpill(node->fun());
     } else {
       __ mov(r0, Operand(0));  // no initial value!
-      frame_->Push(r0);
+      frame_->EmitPush(r0);
     }
-    __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+    frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
     // Ignore the return value (declarations are statements).
+    ASSERT(frame_->height() == original_height);
     return;
   }
 
@@ -1176,168 +1214,225 @@
     {
       // Set initial value.
       Reference target(this, node->proxy());
-      Load(val);
+      LoadAndSpill(val);
       target.SetValue(NOT_CONST_INIT);
       // The reference is removed from the stack (preserving TOS) when
       // it goes out of scope.
     }
     // Get rid of the assigned value (declarations are statements).
-    frame_->Pop();
+    frame_->Drop();
   }
+  ASSERT(frame_->height() == original_height);
 }
 
 
 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ ExpressionStatement");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
   Expression* expression = node->expression();
   expression->MarkAsStatement();
-  Load(expression);
-  frame_->Pop();
+  LoadAndSpill(expression);
+  frame_->Drop();
+  ASSERT(frame_->height() == original_height);
 }
 
 
 void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "// EmptyStatement");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
   // nothing to do
+  ASSERT(frame_->height() == original_height);
 }
 
 
 void CodeGenerator::VisitIfStatement(IfStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ IfStatement");
-  // Generate different code depending on which
-  // parts of the if statement are present or not.
+  // Generate different code depending on which parts of the if statement
+  // are present or not.
   bool has_then_stm = node->HasThenStatement();
   bool has_else_stm = node->HasElseStatement();
 
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
-  Label exit;
+  JumpTarget exit(this);
   if (has_then_stm && has_else_stm) {
     Comment cmnt(masm_, "[ IfThenElse");
-    Label then;
-    Label else_;
+    JumpTarget then(this);
+    JumpTarget else_(this);
     // if (cond)
-    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &else_, true);
-    Branch(false, &else_);
+    LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+                          &then, &else_, true);
+    if (frame_ != NULL) {
+      Branch(false, &else_);
+    }
     // then
-    __ bind(&then);
-    Visit(node->then_statement());
-    __ b(&exit);
+    if (frame_ != NULL || then.is_linked()) {
+      then.Bind();
+      VisitAndSpill(node->then_statement());
+    }
+    if (frame_ != NULL) {
+      exit.Jump();
+    }
     // else
-    __ bind(&else_);
-    Visit(node->else_statement());
+    if (else_.is_linked()) {
+      else_.Bind();
+      VisitAndSpill(node->else_statement());
+    }
 
   } else if (has_then_stm) {
     Comment cmnt(masm_, "[ IfThen");
     ASSERT(!has_else_stm);
-    Label then;
+    JumpTarget then(this);
     // if (cond)
-    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &exit, true);
-    Branch(false, &exit);
+    LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+                          &then, &exit, true);
+    if (frame_ != NULL) {
+      Branch(false, &exit);
+    }
     // then
-    __ bind(&then);
-    Visit(node->then_statement());
+    if (frame_ != NULL || then.is_linked()) {
+      then.Bind();
+      VisitAndSpill(node->then_statement());
+    }
 
   } else if (has_else_stm) {
     Comment cmnt(masm_, "[ IfElse");
     ASSERT(!has_then_stm);
-    Label else_;
+    JumpTarget else_(this);
     // if (!cond)
-    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &exit, &else_, true);
-    Branch(true, &exit);
+    LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+                          &exit, &else_, true);
+    if (frame_ != NULL) {
+      Branch(true, &exit);
+    }
     // else
-    __ bind(&else_);
-    Visit(node->else_statement());
+    if (frame_ != NULL || else_.is_linked()) {
+      else_.Bind();
+      VisitAndSpill(node->else_statement());
+    }
 
   } else {
     Comment cmnt(masm_, "[ If");
     ASSERT(!has_then_stm && !has_else_stm);
     // if (cond)
-    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &exit, &exit, false);
-    if (has_cc()) {
-      cc_reg_ = al;
-    } else {
-      frame_->Pop();
+    LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+                          &exit, &exit, false);
+    if (frame_ != NULL) {
+      if (has_cc()) {
+        cc_reg_ = al;
+      } else {
+        frame_->Drop();
+      }
     }
   }
 
   // end
-  __ bind(&exit);
-}
-
-
-void CodeGenerator::CleanStack(int num_bytes) {
-  ASSERT(num_bytes % kPointerSize == 0);
-  frame_->Drop(num_bytes / kPointerSize);
+  if (exit.is_linked()) {
+    exit.Bind();
+  }
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
 }
 
 
 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ ContinueStatement");
-  CodeForStatement(node);
-  CleanStack(break_stack_height_ - node->target()->break_stack_height());
-  __ b(node->target()->continue_target());
+  CodeForStatementPosition(node);
+  node->target()->continue_target()->Jump();
 }
 
 
 void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ BreakStatement");
-  CodeForStatement(node);
-  CleanStack(break_stack_height_ - node->target()->break_stack_height());
-  __ b(node->target()->break_target());
+  CodeForStatementPosition(node);
+  node->target()->break_target()->Jump();
 }
 
 
 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ ReturnStatement");
-  CodeForStatement(node);
-  Load(node->expression());
-  // Move the function result into r0.
-  frame_->Pop(r0);
 
-  __ b(&function_return_);
+  if (function_return_is_shadowed_) {
+    CodeForStatementPosition(node);
+    LoadAndSpill(node->expression());
+    frame_->EmitPop(r0);
+    function_return_.Jump();
+  } else {
+    // Load the returned value.
+    CodeForStatementPosition(node);
+    LoadAndSpill(node->expression());
+
+    // Pop the result from the frame and prepare the frame for
+    // returning thus making it easier to merge.
+    frame_->EmitPop(r0);
+    frame_->PrepareForReturn();
+
+    function_return_.Jump();
+  }
 }
 
 
 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ WithEnterStatement");
-  CodeForStatement(node);
-  Load(node->expression());
+  CodeForStatementPosition(node);
+  LoadAndSpill(node->expression());
   if (node->is_catch_block()) {
-    __ CallRuntime(Runtime::kPushCatchContext, 1);
+    frame_->CallRuntime(Runtime::kPushCatchContext, 1);
   } else {
-    __ CallRuntime(Runtime::kPushContext, 1);
+    frame_->CallRuntime(Runtime::kPushContext, 1);
   }
   if (kDebug) {
-    Label verified_true;
+    JumpTarget verified_true(this);
     __ cmp(r0, Operand(cp));
-    __ b(eq, &verified_true);
+    verified_true.Branch(eq);
     __ stop("PushContext: r0 is expected to be the same as cp");
-    __ bind(&verified_true);
+    verified_true.Bind();
   }
   // Update context local.
   __ str(cp, frame_->Context());
+  ASSERT(frame_->height() == original_height);
 }
 
 
 void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ WithExitStatement");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
   // Pop context.
   __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
   // Update context local.
   __ str(cp, frame_->Context());
+  ASSERT(frame_->height() == original_height);
 }
 
 
 int CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
-    return kFastSwitchMaxOverheadFactor;
+  return kFastSwitchMaxOverheadFactor;
 }
 
 int CodeGenerator::FastCaseSwitchMinCaseCount() {
-    return kFastSwitchMinCaseCount;
+  return kFastSwitchMinCaseCount;
 }
 
 
@@ -1345,25 +1440,32 @@
     SwitchStatement* node,
     int min_index,
     int range,
-    Label* fail_label,
+    Label* default_label,
     Vector<Label*> case_targets,
     Vector<Label> case_labels) {
+  VirtualFrame::SpilledScope spilled_scope(this);
+  JumpTarget setup_default(this);
+  JumpTarget is_smi(this);
+
+  // A non-null default label pointer indicates a default case among
+  // the case labels.  Otherwise we use the break target as a
+  // "default" for failure to hit the jump table.
+  JumpTarget* default_target =
+      (default_label == NULL) ? node->break_target() : &setup_default;
 
   ASSERT(kSmiTag == 0 && kSmiTagSize <= 2);
-
-  frame_->Pop(r0);
+  frame_->EmitPop(r0);
 
   // Test for a Smi value in a HeapNumber.
-  Label is_smi;
   __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &is_smi);
+  is_smi.Branch(eq);
   __ ldr(r1, MemOperand(r0, HeapObject::kMapOffset - kHeapObjectTag));
   __ ldrb(r1, MemOperand(r1, Map::kInstanceTypeOffset - kHeapObjectTag));
   __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
-  __ b(ne, fail_label);
-  frame_->Push(r0);
-  __ CallRuntime(Runtime::kNumberToSmi, 1);
-  __ bind(&is_smi);
+  default_target->Branch(ne);
+  frame_->EmitPush(r0);
+  frame_->CallRuntime(Runtime::kNumberToSmi, 1);
+  is_smi.Bind();
 
   if (min_index != 0) {
     // Small positive numbers can be immediate operands.
@@ -1380,89 +1482,139 @@
     }
   }
   __ tst(r0, Operand(0x80000000 | kSmiTagMask));
-  __ b(ne, fail_label);
+  default_target->Branch(ne);
   __ cmp(r0, Operand(Smi::FromInt(range)));
-  __ b(ge, fail_label);
+  default_target->Branch(ge);
+  VirtualFrame* start_frame = new VirtualFrame(frame_);
   __ SmiJumpTable(r0, case_targets);
 
-  GenerateFastCaseSwitchCases(node, case_labels);
+  GenerateFastCaseSwitchCases(node, case_labels, start_frame);
+
+  // If there was a default case among the case labels, we need to
+  // emit code to jump to it from the default target used for failure
+  // to hit the jump table.
+  if (default_label != NULL) {
+    if (has_valid_frame()) {
+      node->break_target()->Jump();
+    }
+    setup_default.Bind();
+    frame_->MergeTo(start_frame);
+    __ b(default_label);
+    DeleteFrame();
+  }
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+
+  delete start_frame;
 }
 
 
 void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ SwitchStatement");
-  CodeForStatement(node);
-  node->set_break_stack_height(break_stack_height_);
+  CodeForStatementPosition(node);
+  node->break_target()->Initialize(this);
 
-  Load(node->tag());
-
+  LoadAndSpill(node->tag());
   if (TryGenerateFastCaseSwitchStatement(node)) {
-      return;
+    ASSERT(!has_valid_frame() || frame_->height() == original_height);
+    return;
   }
 
-  Label next, fall_through, default_case;
+  JumpTarget next_test(this);
+  JumpTarget fall_through(this);
+  JumpTarget default_entry(this);
+  JumpTarget default_exit(this, JumpTarget::BIDIRECTIONAL);
   ZoneList<CaseClause*>* cases = node->cases();
   int length = cases->length();
+  CaseClause* default_clause = NULL;
 
   for (int i = 0; i < length; i++) {
     CaseClause* clause = cases->at(i);
-
-    Comment cmnt(masm_, "[ case clause");
-
     if (clause->is_default()) {
-      // Continue matching cases. The program will execute the default case's
-      // statements if it does not match any of the cases.
-      __ b(&next);
-
-      // Bind the default case label, so we can branch to it when we
-      // have compared against all other cases.
-      ASSERT(default_case.is_unused());  // at most one default clause
-      __ bind(&default_case);
-    } else {
-      __ bind(&next);
-      next.Unuse();
-      __ ldr(r0, frame_->Top());
-      frame_->Push(r0);  // duplicate TOS
-      Load(clause->label());
-      Comparison(eq, true);
-      Branch(false, &next);
+      // Remember the default clause and compile it at the end.
+      default_clause = clause;
+      continue;
     }
 
-    // Entering the case statement for the first time. Remove the switch value
-    // from the stack.
-    frame_->Pop();
+    Comment cmnt(masm_, "[ Case clause");
+    // Compile the test.
+    next_test.Bind();
+    next_test.Unuse();
+    // Duplicate TOS.
+    __ ldr(r0, frame_->Top());
+    frame_->EmitPush(r0);
+    LoadAndSpill(clause->label());
+    Comparison(eq, true);
+    Branch(false, &next_test);
 
-    // Generate code for the body.
-    // This is also the target for the fall through from the previous case's
-    // statements which has to skip over the matching code and the popping of
-    // the switch value.
-    __ bind(&fall_through);
-    fall_through.Unuse();
-    VisitStatements(clause->statements());
-    __ b(&fall_through);
+    // Before entering the body from the test, remove the switch value from
+    // the stack.
+    frame_->Drop();
+
+    // Label the body so that fall through is enabled.
+    if (i > 0 && cases->at(i - 1)->is_default()) {
+      default_exit.Bind();
+    } else {
+      fall_through.Bind();
+      fall_through.Unuse();
+    }
+    VisitStatementsAndSpill(clause->statements());
+
+    // If control flow can fall through from the body, jump to the next body
+    // or the end of the statement.
+    if (frame_ != NULL) {
+      if (i < length - 1 && cases->at(i + 1)->is_default()) {
+        default_entry.Jump();
+      } else {
+        fall_through.Jump();
+      }
+    }
   }
 
-  __ bind(&next);
-  // Reached the end of the case statements without matching any of the cases.
-  if (default_case.is_bound()) {
-    // A default case exists -> execute its statements.
-    __ b(&default_case);
-  } else {
-    // Remove the switch value from the stack.
-    frame_->Pop();
+  // The final "test" removes the switch value.
+  next_test.Bind();
+  frame_->Drop();
+
+  // If there is a default clause, compile it.
+  if (default_clause != NULL) {
+    Comment cmnt(masm_, "[ Default clause");
+    default_entry.Bind();
+    VisitStatementsAndSpill(default_clause->statements());
+    // If control flow can fall out of the default and there is a case after
+    // it, jup to that case's body.
+    if (frame_ != NULL && default_exit.is_bound()) {
+      default_exit.Jump();
+    }
   }
 
-  __ bind(&fall_through);
-  __ bind(node->break_target());
+  if (fall_through.is_linked()) {
+    fall_through.Bind();
+  }
+
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->break_target()->Unuse();
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
 }
 
 
 void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ LoopStatement");
-  CodeForStatement(node);
-  node->set_break_stack_height(break_stack_height_);
+  CodeForStatementPosition(node);
+  node->break_target()->Initialize(this);
 
-  // simple condition analysis
+  // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
+  // known result for the test expression, with no side effects.
   enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
   if (node->cond() == NULL) {
     ASSERT(node->type() == LoopStatement::FOR_LOOP);
@@ -1478,81 +1630,195 @@
     }
   }
 
-  Label loop, entry;
+  switch (node->type()) {
+    case LoopStatement::DO_LOOP: {
+      JumpTarget body(this, JumpTarget::BIDIRECTIONAL);
 
-  // init
-  if (node->init() != NULL) {
-    ASSERT(node->type() == LoopStatement::FOR_LOOP);
-    Visit(node->init());
-  }
-  if (node->type() != LoopStatement::DO_LOOP && info != ALWAYS_TRUE) {
-    __ b(&entry);
-  }
+      // Label the top of the loop for the backward CFG edge.  If the test
+      // is always true we can use the continue target, and if the test is
+      // always false there is no need.
+      if (info == ALWAYS_TRUE) {
+        node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else if (info == ALWAYS_FALSE) {
+        node->continue_target()->Initialize(this);
+      } else {
+        ASSERT(info == DONT_KNOW);
+        node->continue_target()->Initialize(this);
+        body.Bind();
+      }
 
-  // body
-  __ bind(&loop);
-  Visit(node->body());
-
-  // next
-  __ bind(node->continue_target());
-  if (node->next() != NULL) {
-    // Record source position of the statement as this code which is after the
-    // code for the body actually belongs to the loop statement and not the
-    // body.
-    CodeForStatement(node);
-    ASSERT(node->type() == LoopStatement::FOR_LOOP);
-    Visit(node->next());
-  }
-
-  // cond
-  __ bind(&entry);
-  switch (info) {
-    case ALWAYS_TRUE:
       CheckStack();  // TODO(1222600): ignore if body contains calls.
-      __ b(&loop);
+      VisitAndSpill(node->body());
+
+      // Compile the test.
+      if (info == ALWAYS_TRUE) {
+        if (has_valid_frame()) {
+          // If control can fall off the end of the body, jump back to the
+          // top.
+          node->continue_target()->Jump();
+        }
+      } else if (info == ALWAYS_FALSE) {
+        // If we have a continue in the body, we only have to bind its jump
+        // target.
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);
+        // We have to compile the test expression if it can be reached by
+        // control flow falling out of the body or via continue.
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+        if (has_valid_frame()) {
+          LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+                                &body, node->break_target(), true);
+          if (has_valid_frame()) {
+            // A invalid frame here indicates that control did not
+            // fall out of the test expression.
+            Branch(true, &body);
+          }
+        }
+      }
       break;
-    case ALWAYS_FALSE:
+    }
+
+    case LoopStatement::WHILE_LOOP: {
+      // If the test is never true and has no side effects there is no need
+      // to compile the test or body.
+      if (info == ALWAYS_FALSE) break;
+
+      // Label the top of the loop with the continue target for the backward
+      // CFG edge.
+      node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+      node->continue_target()->Bind();
+
+      if (info == DONT_KNOW) {
+        JumpTarget body(this);
+        LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+                              &body, node->break_target(), true);
+        if (has_valid_frame()) {
+          // A NULL frame indicates that control did not fall out of the
+          // test expression.
+          Branch(false, node->break_target());
+        }
+        if (has_valid_frame() || body.is_linked()) {
+          body.Bind();
+        }
+      }
+
+      if (has_valid_frame()) {
+        CheckStack();  // TODO(1222600): ignore if body contains calls.
+        VisitAndSpill(node->body());
+
+        // If control flow can fall out of the body, jump back to the top.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
+        }
+      }
       break;
-    case DONT_KNOW:
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      LoadCondition(node->cond(),
-                    NOT_INSIDE_TYPEOF,
-                    &loop,
-                    node->break_target(),
-                    true);
-      Branch(true, &loop);
+    }
+
+    case LoopStatement::FOR_LOOP: {
+      JumpTarget loop(this, JumpTarget::BIDIRECTIONAL);
+
+      if (node->init() != NULL) {
+        VisitAndSpill(node->init());
+      }
+
+      // There is no need to compile the test or body.
+      if (info == ALWAYS_FALSE) break;
+
+      // If there is no update statement, label the top of the loop with the
+      // continue target, otherwise with the loop target.
+      if (node->next() == NULL) {
+        node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        node->continue_target()->Initialize(this);
+        loop.Bind();
+      }
+
+      // If the test is always true, there is no need to compile it.
+      if (info == DONT_KNOW) {
+        JumpTarget body(this);
+        LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+                              &body, node->break_target(), true);
+        if (has_valid_frame()) {
+          Branch(false, node->break_target());
+        }
+        if (has_valid_frame() || body.is_linked()) {
+          body.Bind();
+        }
+      }
+
+      if (has_valid_frame()) {
+        CheckStack();  // TODO(1222600): ignore if body contains calls.
+        VisitAndSpill(node->body());
+
+        if (node->next() == NULL) {
+          // If there is no update statement and control flow can fall out
+          // of the loop, jump directly to the continue label.
+          if (has_valid_frame()) {
+            node->continue_target()->Jump();
+          }
+        } else {
+          // If there is an update statement and control flow can reach it
+          // via falling out of the body of the loop or continuing, we
+          // compile the update statement.
+          if (node->continue_target()->is_linked()) {
+            node->continue_target()->Bind();
+          }
+          if (has_valid_frame()) {
+            // Record source position of the statement as this code which is
+            // after the code for the body actually belongs to the loop
+            // statement and not the body.
+            CodeForStatementPosition(node);
+            VisitAndSpill(node->next());
+            loop.Jump();
+          }
+        }
+      }
       break;
+    }
   }
 
-  // exit
-  __ bind(node->break_target());
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
 }
 
 
 void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ ForInStatement");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
-  // We keep stuff on the stack while the body is executing.
-  // Record it, so that a break/continue crossing this statement
-  // can restore the stack.
-  const int kForInStackSize = 5 * kPointerSize;
-  break_stack_height_ += kForInStackSize;
-  node->set_break_stack_height(break_stack_height_);
-
-  Label loop, next, entry, cleanup, exit, primitive, jsobject;
-  Label filter_key, end_del_check, fixed_array, non_string;
+  JumpTarget primitive(this);
+  JumpTarget jsobject(this);
+  JumpTarget fixed_array(this);
+  JumpTarget entry(this, JumpTarget::BIDIRECTIONAL);
+  JumpTarget end_del_check(this);
+  JumpTarget exit(this);
 
   // Get the object to enumerate over (converted to JSObject).
-  Load(node->enumerable());
-  frame_->Pop(r0);
+  LoadAndSpill(node->enumerable());
 
   // Both SpiderMonkey and kjs ignore null and undefined in contrast
   // to the specification.  12.6.4 mandates a call to ToObject.
+  frame_->EmitPop(r0);
   __ cmp(r0, Operand(Factory::undefined_value()));
-  __ b(eq, &exit);
+  exit.Branch(eq);
   __ cmp(r0, Operand(Factory::null_value()));
-  __ b(eq, &exit);
+  exit.Branch(eq);
 
   // Stack layout in body:
   // [iteration counter (Smi)]
@@ -1563,31 +1829,31 @@
 
   // Check if enumerable is already a JSObject
   __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &primitive);
+  primitive.Branch(eq);
   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
   __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
-  __ b(hs, &jsobject);
+  jsobject.Branch(hs);
 
-  __ bind(&primitive);
-  frame_->Push(r0);
-  __ mov(r0, Operand(0));
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+  primitive.Bind();
+  frame_->EmitPush(r0);
+  Result arg_count = allocator_->Allocate(r0);
+  ASSERT(arg_count.is_valid());
+  __ mov(arg_count.reg(), Operand(0));
+  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
 
-
-  __ bind(&jsobject);
-
+  jsobject.Bind();
   // Get the set of properties (as a FixedArray or Map).
-  frame_->Push(r0);  // duplicate the object being enumerated
-  frame_->Push(r0);
-  __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+  frame_->EmitPush(r0);  // duplicate the object being enumerated
+  frame_->EmitPush(r0);
+  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
 
   // If we got a Map, we can do a fast modification check.
   // Otherwise, we got a FixedArray, and we have to do a slow check.
   __ mov(r2, Operand(r0));
   __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
   __ cmp(r1, Operand(Factory::meta_map()));
-  __ b(ne, &fixed_array);
+  fixed_array.Branch(ne);
 
   // Get enum cache
   __ mov(r1, Operand(r0));
@@ -1596,94 +1862,87 @@
   __ ldr(r2,
          FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
-  frame_->Push(r0);  // map
-  frame_->Push(r2);  // enum cache bridge cache
+  frame_->EmitPush(r0);  // map
+  frame_->EmitPush(r2);  // enum cache bridge cache
   __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
   __ mov(r0, Operand(r0, LSL, kSmiTagSize));
-  frame_->Push(r0);
+  frame_->EmitPush(r0);
   __ mov(r0, Operand(Smi::FromInt(0)));
-  frame_->Push(r0);
-  __ b(&entry);
+  frame_->EmitPush(r0);
+  entry.Jump();
 
-
-  __ bind(&fixed_array);
-
+  fixed_array.Bind();
   __ mov(r1, Operand(Smi::FromInt(0)));
-  frame_->Push(r1);  // insert 0 in place of Map
-  frame_->Push(r0);
+  frame_->EmitPush(r1);  // insert 0 in place of Map
+  frame_->EmitPush(r0);
 
   // Push the length of the array and the initial index onto the stack.
   __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
   __ mov(r0, Operand(r0, LSL, kSmiTagSize));
-  frame_->Push(r0);
+  frame_->EmitPush(r0);
   __ mov(r0, Operand(Smi::FromInt(0)));  // init index
-  frame_->Push(r0);
-
-  __ b(&entry);
-
-  // Body.
-  __ bind(&loop);
-  Visit(node->body());
-
-  // Next.
-  __ bind(node->continue_target());
-  __ bind(&next);
-  frame_->Pop(r0);
-  __ add(r0, r0, Operand(Smi::FromInt(1)));
-  frame_->Push(r0);
+  frame_->EmitPush(r0);
 
   // Condition.
-  __ bind(&entry);
-
+  entry.Bind();
   // sp[0] : index
   // sp[1] : array/enum cache length
   // sp[2] : array or enum cache
   // sp[3] : 0 or map
   // sp[4] : enumerable
-  __ ldr(r0, frame_->Element(0));  // load the current count
-  __ ldr(r1, frame_->Element(1));  // load the length
-  __ cmp(r0, Operand(r1));  // compare to the array length
-  __ b(hs, &cleanup);
+  // Grab the current frame's height for the break and continue
+  // targets only after all the state is pushed on the frame.
+  node->break_target()->Initialize(this);
+  node->continue_target()->Initialize(this);
 
-  __ ldr(r0, frame_->Element(0));
+  __ ldr(r0, frame_->ElementAt(0));  // load the current count
+  __ ldr(r1, frame_->ElementAt(1));  // load the length
+  __ cmp(r0, Operand(r1));  // compare to the array length
+  node->break_target()->Branch(hs);
+
+  __ ldr(r0, frame_->ElementAt(0));
 
   // Get the i'th entry of the array.
-  __ ldr(r2, frame_->Element(2));
+  __ ldr(r2, frame_->ElementAt(2));
   __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
 
   // Get Map or 0.
-  __ ldr(r2, frame_->Element(3));
+  __ ldr(r2, frame_->ElementAt(3));
   // Check if this (still) matches the map of the enumerable.
   // If not, we have to filter the key.
-  __ ldr(r1, frame_->Element(4));
+  __ ldr(r1, frame_->ElementAt(4));
   __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
   __ cmp(r1, Operand(r2));
-  __ b(eq, &end_del_check);
+  end_del_check.Branch(eq);
 
   // Convert the entry to a string (or null if it isn't a property anymore).
-  __ ldr(r0, frame_->Element(4));  // push enumerable
-  frame_->Push(r0);
-  frame_->Push(r3);  // push entry
-  __ mov(r0, Operand(1));
-  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS);
-  __ mov(r3, Operand(r0));
+  __ ldr(r0, frame_->ElementAt(4));  // push enumerable
+  frame_->EmitPush(r0);
+  frame_->EmitPush(r3);  // push entry
+  Result arg_count_register = allocator_->Allocate(r0);
+  ASSERT(arg_count_register.is_valid());
+  __ mov(arg_count_register.reg(), Operand(1));
+  Result result = frame_->InvokeBuiltin(Builtins::FILTER_KEY,
+                                        CALL_JS,
+                                        &arg_count_register,
+                                        2);
+  __ mov(r3, Operand(result.reg()));
+  result.Unuse();
 
   // If the property has been removed while iterating, we just skip it.
   __ cmp(r3, Operand(Factory::null_value()));
-  __ b(eq, &next);
+  node->continue_target()->Branch(eq);
 
-
-  __ bind(&end_del_check);
-
-  // Store the entry in the 'each' expression and take another spin in the loop.
-  // r3: i'th entry of the enum cache (or string there of)
-  frame_->Push(r3);  // push entry
+  end_del_check.Bind();
+  // Store the entry in the 'each' expression and take another spin in the
+  // loop.  r3: i'th entry of the enum cache (or string there of)
+  frame_->EmitPush(r3);  // push entry
   { Reference each(this, node->each());
     if (!each.is_illegal()) {
       if (each.size() > 0) {
-        __ ldr(r0, frame_->Element(each.size()));
-        frame_->Push(r0);
+        __ ldr(r0, frame_->ElementAt(each.size()));
+        frame_->EmitPush(r0);
       }
       // If the reference was to a slot we rely on the convenient property
       // that it doesn't matter whether a value (eg, r3 pushed above) is
@@ -1695,37 +1954,51 @@
         // ie, now the topmost value of the non-zero sized reference), since
         // we will discard the top of stack after unloading the reference
         // anyway.
-        frame_->Pop(r0);
+        frame_->EmitPop(r0);
       }
     }
   }
   // Discard the i'th entry pushed above or else the remainder of the
   // reference, whichever is currently on top of the stack.
-  frame_->Pop();
+  frame_->Drop();
+
+  // Body.
   CheckStack();  // TODO(1222600): ignore if body contains calls.
-  __ jmp(&loop);
+  VisitAndSpill(node->body());
+
+  // Next.
+  node->continue_target()->Bind();
+  frame_->EmitPop(r0);
+  __ add(r0, r0, Operand(Smi::FromInt(1)));
+  frame_->EmitPush(r0);
+  entry.Jump();
 
   // Cleanup.
-  __ bind(&cleanup);
-  __ bind(node->break_target());
+  node->break_target()->Bind();
   frame_->Drop(5);
 
   // Exit.
-  __ bind(&exit);
-
-  break_stack_height_ -= kForInStackSize;
+  exit.Bind();
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
+  ASSERT(frame_->height() == original_height);
 }
 
 
 void CodeGenerator::VisitTryCatch(TryCatch* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ TryCatch");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
-  Label try_block, exit;
+  JumpTarget try_block(this);
+  JumpTarget exit(this);
 
-  __ bl(&try_block);
+  try_block.Call();
   // --- Catch block ---
-  frame_->Push(r0);
+  frame_->EmitPush(r0);
 
   // Store the caught exception in the catch variable.
   { Reference ref(this, node->catch_var());
@@ -1737,16 +2010,19 @@
   }
 
   // Remove the exception from the stack.
-  frame_->Pop();
+  frame_->Drop();
 
-  VisitStatements(node->catch_block()->statements());
-  __ b(&exit);
+  VisitStatementsAndSpill(node->catch_block()->statements());
+  if (frame_ != NULL) {
+    exit.Jump();
+  }
 
 
   // --- Try block ---
-  __ bind(&try_block);
+  try_block.Bind();
 
-  __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
+  frame_->PushTryHandler(TRY_CATCH_HANDLER);
+  int handler_height = frame_->height();
 
   // Shadow the labels for all escapes from the try block, including
   // returns. During shadowing, the original label is hidden as the
@@ -1755,87 +2031,116 @@
   //
   // We should probably try to unify the escaping labels and the return
   // label.
-  int nof_escapes = node->escaping_labels()->length();
-  List<LabelShadow*> shadows(1 + nof_escapes);
-  shadows.Add(new LabelShadow(&function_return_));
+  int nof_escapes = node->escaping_targets()->length();
+  List<ShadowTarget*> shadows(1 + nof_escapes);
+
+  // Add the shadow target for the function return.
+  static const int kReturnShadowIndex = 0;
+  shadows.Add(new ShadowTarget(&function_return_));
+  bool function_return_was_shadowed = function_return_is_shadowed_;
+  function_return_is_shadowed_ = true;
+  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+  // Add the remaining shadow targets.
   for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new LabelShadow(node->escaping_labels()->at(i)));
+    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
   }
 
   // Generate code for the statements in the try block.
-  VisitStatements(node->try_block()->statements());
-  // Discard the code slot from the handler.
-  frame_->Pop();
+  VisitStatementsAndSpill(node->try_block()->statements());
 
   // Stop the introduced shadowing and count the number of required unlinks.
   // After shadowing stops, the original labels are unshadowed and the
   // LabelShadows represent the formerly shadowing labels.
-  int nof_unlinks = 0;
-  for (int i = 0; i <= nof_escapes; i++) {
+  bool has_unlinks = false;
+  for (int i = 0; i < shadows.length(); i++) {
     shadows[i]->StopShadowing();
-    if (shadows[i]->is_linked()) nof_unlinks++;
+    has_unlinks = has_unlinks || shadows[i]->is_linked();
+  }
+  function_return_is_shadowed_ = function_return_was_shadowed;
+
+  // Get an external reference to the handler address.
+  ExternalReference handler_address(Top::k_handler_address);
+
+  // The next handler address is at kNextIndex in the stack.
+  const int kNextIndex = StackHandlerConstants::kNextOffset / kPointerSize;
+  // If we can fall off the end of the try block, unlink from try chain.
+  if (has_valid_frame()) {
+    __ ldr(r1, frame_->ElementAt(kNextIndex));
+    __ mov(r3, Operand(handler_address));
+    __ str(r1, MemOperand(r3));
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize);
+    if (has_unlinks) {
+      exit.Jump();
+    }
   }
 
-  // Unlink from try chain.
-  // The code slot has already been discarded, so the next index is
-  // adjusted by 1.
-  const int kNextIndex =
-      (StackHandlerConstants::kNextOffset / kPointerSize) - 1;
-  __ ldr(r1, frame_->Element(kNextIndex));  // read next_sp
-  __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
-  __ str(r1, MemOperand(r3));
-  // The code slot has already been dropped from the handler.
-  frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-  if (nof_unlinks > 0) __ b(&exit);
-
   // Generate unlink code for the (formerly) shadowing labels that have been
-  // jumped to.
-  for (int i = 0; i <= nof_escapes; i++) {
+  // jumped to.  Deallocate each shadow target.
+  for (int i = 0; i < shadows.length(); i++) {
     if (shadows[i]->is_linked()) {
       // Unlink from try chain;
-      __ bind(shadows[i]);
+      shadows[i]->Bind();
+      // Because we can be jumping here (to spilled code) from unspilled
+      // code, we need to reestablish a spilled frame at this block.
+      frame_->SpillAll();
 
       // Reload sp from the top handler, because some statements that we
       // break from (eg, for...in) may have left stuff on the stack.
-      __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+      __ mov(r3, Operand(handler_address));
       __ ldr(sp, MemOperand(r3));
+      // The stack pointer was restored to just below the code slot
+      // (the topmost slot) in the handler.
+      frame_->Forget(frame_->height() - handler_height + 1);
 
-      __ ldr(r1, frame_->Element(kNextIndex));
+      // kNextIndex is off by one because the code slot has already
+      // been dropped.
+      __ ldr(r1, frame_->ElementAt(kNextIndex - 1));
       __ str(r1, MemOperand(r3));
       // The code slot has already been dropped from the handler.
       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
 
-      __ b(shadows[i]->original_label());
+      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
+        frame_->PrepareForReturn();
+      }
+      shadows[i]->other_target()->Jump();
     }
+    delete shadows[i];
   }
 
-  __ bind(&exit);
+  exit.Bind();
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
 }
 
 
 void CodeGenerator::VisitTryFinally(TryFinally* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ TryFinally");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
   // State: Used to keep track of reason for entering the finally
   // block. Should probably be extended to hold information for
   // break/continue from within the try block.
   enum { FALLING, THROWING, JUMPING };
 
-  Label exit, unlink, try_block, finally_block;
+  JumpTarget try_block(this);
+  JumpTarget finally_block(this);
 
-  __ bl(&try_block);
+  try_block.Call();
 
-  frame_->Push(r0);  // save exception object on the stack
+  frame_->EmitPush(r0);  // save exception object on the stack
   // In case of thrown exceptions, this is where we continue.
   __ mov(r2, Operand(Smi::FromInt(THROWING)));
-  __ b(&finally_block);
-
+  finally_block.Jump();
 
   // --- Try block ---
-  __ bind(&try_block);
+  try_block.Bind();
 
-  __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
+  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
+  int handler_height = frame_->height();
 
   // Shadow the labels for all escapes from the try block, including
   // returns.  Shadowing hides the original label as the LabelShadow and
@@ -1843,173 +2148,247 @@
   //
   // We should probably try to unify the escaping labels and the return
   // label.
-  int nof_escapes = node->escaping_labels()->length();
-  List<LabelShadow*> shadows(1 + nof_escapes);
-  shadows.Add(new LabelShadow(&function_return_));
+  int nof_escapes = node->escaping_targets()->length();
+  List<ShadowTarget*> shadows(1 + nof_escapes);
+
+  // Add the shadow target for the function return.
+  static const int kReturnShadowIndex = 0;
+  shadows.Add(new ShadowTarget(&function_return_));
+  bool function_return_was_shadowed = function_return_is_shadowed_;
+  function_return_is_shadowed_ = true;
+  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+  // Add the remaining shadow targets.
   for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new LabelShadow(node->escaping_labels()->at(i)));
+    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
   }
 
   // Generate code for the statements in the try block.
-  VisitStatements(node->try_block()->statements());
+  VisitStatementsAndSpill(node->try_block()->statements());
 
   // Stop the introduced shadowing and count the number of required unlinks.
   // After shadowing stops, the original labels are unshadowed and the
   // LabelShadows represent the formerly shadowing labels.
   int nof_unlinks = 0;
-  for (int i = 0; i <= nof_escapes; i++) {
+  for (int i = 0; i < shadows.length(); i++) {
     shadows[i]->StopShadowing();
     if (shadows[i]->is_linked()) nof_unlinks++;
   }
+  function_return_is_shadowed_ = function_return_was_shadowed;
 
-  // Set the state on the stack to FALLING.
-  __ mov(r0, Operand(Factory::undefined_value()));  // fake TOS
-  frame_->Push(r0);
-  __ mov(r2, Operand(Smi::FromInt(FALLING)));
-  if (nof_unlinks > 0) __ b(&unlink);
+  // Get an external reference to the handler address.
+  ExternalReference handler_address(Top::k_handler_address);
 
-  // Generate code to set the state for the (formerly) shadowing labels that
-  // have been jumped to.
-  for (int i = 0; i <= nof_escapes; i++) {
+  // The next handler address is at kNextIndex in the stack.
+  const int kNextIndex = StackHandlerConstants::kNextOffset / kPointerSize;
+  // If we can fall off the end of the try block, unlink from the try
+  // chain and set the state on the frame to FALLING.
+  if (has_valid_frame()) {
+    __ ldr(r1, frame_->ElementAt(kNextIndex));
+    __ mov(r3, Operand(handler_address));
+    __ str(r1, MemOperand(r3));
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize);
+
+    // Fake a top of stack value (unneeded when FALLING) and set the
+    // state in r2, then jump around the unlink blocks if any.
+    __ mov(r0, Operand(Factory::undefined_value()));
+    frame_->EmitPush(r0);
+    __ mov(r2, Operand(Smi::FromInt(FALLING)));
+    if (nof_unlinks > 0) {
+      finally_block.Jump();
+    }
+  }
+
+  // Generate code to unlink and set the state for the (formerly)
+  // shadowing targets that have been jumped to.
+  for (int i = 0; i < shadows.length(); i++) {
     if (shadows[i]->is_linked()) {
-      __ bind(shadows[i]);
-      if (shadows[i]->original_label() == &function_return_) {
+      // If we have come from the shadowed return, the return value is
+      // in (a non-refcounted reference to) r0.  We must preserve it
+      // until it is pushed.
+      //
+      // Because we can be jumping here (to spilled code) from
+      // unspilled code, we need to reestablish a spilled frame at
+      // this block.
+      shadows[i]->Bind();
+      frame_->SpillAll();
+
+      // Reload sp from the top handler, because some statements that
+      // we break from (eg, for...in) may have left stuff on the
+      // stack.
+      __ mov(r3, Operand(handler_address));
+      __ ldr(sp, MemOperand(r3));
+      // The stack pointer was restored to the address slot in the handler.
+      ASSERT(StackHandlerConstants::kNextOffset == 1 * kPointerSize);
+      frame_->Forget(frame_->height() - handler_height + 1);
+
+      // Unlink this handler and drop it from the frame.  The next
+      // handler address is now on top of the frame.
+      frame_->EmitPop(r1);
+      __ str(r1, MemOperand(r3));
+      // The top (code) and the second (handler) slot have both been
+      // dropped already.
+      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 2);
+
+      if (i == kReturnShadowIndex) {
         // If this label shadowed the function return, materialize the
         // return value on the stack.
-        frame_->Push(r0);
+        frame_->EmitPush(r0);
       } else {
-        // Fake TOS for labels that shadowed breaks and continues.
+        // Fake TOS for targets that shadowed breaks and continues.
         __ mov(r0, Operand(Factory::undefined_value()));
-        frame_->Push(r0);
+        frame_->EmitPush(r0);
       }
       __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
-      __ b(&unlink);
+      if (--nof_unlinks > 0) {
+        // If this is not the last unlink block, jump around the next.
+        finally_block.Jump();
+      }
     }
   }
 
-  // Unlink from try chain;
-  __ bind(&unlink);
-
-  frame_->Pop(r0);  // Preserve TOS result in r0 across stack manipulation.
-  // Reload sp from the top handler, because some statements that we
-  // break from (eg, for...in) may have left stuff on the stack.
-  __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
-  __ ldr(sp, MemOperand(r3));
-  const int kNextIndex = (StackHandlerConstants::kNextOffset
-                          + StackHandlerConstants::kAddressDisplacement)
-                       / kPointerSize;
-  __ ldr(r1, frame_->Element(kNextIndex));
-  __ str(r1, MemOperand(r3));
-  ASSERT(StackHandlerConstants::kCodeOffset == 0);  // first field is code
-  // The stack pointer was restored to just below the code slot (the
-  // topmost slot) of the handler, so all but the code slot need to be
-  // dropped.
-  frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-  // Restore result to TOS.
-  frame_->Push(r0);
-
   // --- Finally block ---
-  __ bind(&finally_block);
+  finally_block.Bind();
 
   // Push the state on the stack.
-  frame_->Push(r2);
+  frame_->EmitPush(r2);
 
   // We keep two elements on the stack - the (possibly faked) result
-  // and the state - while evaluating the finally block. Record it, so
-  // that a break/continue crossing this statement can restore the
-  // stack.
-  const int kFinallyStackSize = 2 * kPointerSize;
-  break_stack_height_ += kFinallyStackSize;
-
+  // and the state - while evaluating the finally block.
+  //
   // Generate code for the statements in the finally block.
-  VisitStatements(node->finally_block()->statements());
+  VisitStatementsAndSpill(node->finally_block()->statements());
 
-  // Restore state and return value or faked TOS.
-  frame_->Pop(r2);
-  frame_->Pop(r0);
-  break_stack_height_ -= kFinallyStackSize;
-
-  // Generate code to jump to the right destination for all used (formerly)
-  // shadowing labels.
-  for (int i = 0; i <= nof_escapes; i++) {
-    if (shadows[i]->is_bound()) {
-      __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
-      __ b(eq, shadows[i]->original_label());
-    }
+  if (has_valid_frame()) {
+    // Restore state and return value or faked TOS.
+    frame_->EmitPop(r2);
+    frame_->EmitPop(r0);
   }
 
-  // Check if we need to rethrow the exception.
-  __ cmp(r2, Operand(Smi::FromInt(THROWING)));
-  __ b(ne, &exit);
+  // Generate code to jump to the right destination for all used
+  // formerly shadowing targets.  Deallocate each shadow target.
+  for (int i = 0; i < shadows.length(); i++) {
+    if (has_valid_frame() && shadows[i]->is_bound()) {
+      JumpTarget* original = shadows[i]->other_target();
+      __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
+      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
+        JumpTarget skip(this);
+        skip.Branch(ne);
+        frame_->PrepareForReturn();
+        original->Jump();
+        skip.Bind();
+      } else {
+        original->Branch(eq);
+      }
+    }
+    delete shadows[i];
+  }
 
-  // Rethrow exception.
-  frame_->Push(r0);
-  __ CallRuntime(Runtime::kReThrow, 1);
+  if (has_valid_frame()) {
+    // Check if we need to rethrow the exception.
+    JumpTarget exit(this);
+    __ cmp(r2, Operand(Smi::FromInt(THROWING)));
+    exit.Branch(ne);
 
-  // Done.
-  __ bind(&exit);
+    // Rethrow exception.
+    frame_->EmitPush(r0);
+    frame_->CallRuntime(Runtime::kReThrow, 1);
+
+    // Done.
+    exit.Bind();
+  }
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
 }
 
 
 void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ DebuggerStatament");
-  CodeForStatement(node);
-  __ CallRuntime(Runtime::kDebugBreak, 0);
+  CodeForStatementPosition(node);
+  frame_->CallRuntime(Runtime::kDebugBreak, 0);
   // Ignore the return value.
+  ASSERT(frame_->height() == original_height);
 }
 
 
 void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   ASSERT(boilerplate->IsBoilerplate());
 
   // Push the boilerplate on the stack.
   __ mov(r0, Operand(boilerplate));
-  frame_->Push(r0);
+  frame_->EmitPush(r0);
 
   // Create a new closure.
-  frame_->Push(cp);
-  __ CallRuntime(Runtime::kNewClosure, 2);
-  frame_->Push(r0);
+  frame_->EmitPush(cp);
+  frame_->CallRuntime(Runtime::kNewClosure, 2);
+  frame_->EmitPush(r0);
 }
 
 
 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function boilerplate and instantiate it.
   Handle<JSFunction> boilerplate = BuildBoilerplate(node);
   // Check for stack-overflow exception.
-  if (HasStackOverflow()) return;
+  if (HasStackOverflow()) {
+    ASSERT(frame_->height() == original_height);
+    return;
+  }
   InstantiateBoilerplate(boilerplate);
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitFunctionBoilerplateLiteral(
     FunctionBoilerplateLiteral* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
   InstantiateBoilerplate(node->boilerplate());
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitConditional(Conditional* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ Conditional");
-  Label then, else_, exit;
-  LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &else_, true);
+  JumpTarget then(this);
+  JumpTarget else_(this);
+  JumpTarget exit(this);
+  LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+                        &then, &else_, true);
   Branch(false, &else_);
-  __ bind(&then);
-  Load(node->then_expression(), typeof_state());
-  __ b(&exit);
-  __ bind(&else_);
-  Load(node->else_expression(), typeof_state());
-  __ bind(&exit);
+  then.Bind();
+  LoadAndSpill(node->then_expression(), typeof_state());
+  exit.Jump();
+  else_.Bind();
+  LoadAndSpill(node->else_expression(), typeof_state());
+  exit.Bind();
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   if (slot->type() == Slot::LOOKUP) {
     ASSERT(slot->var()->is_dynamic());
 
-    Label slow, done;
+    JumpTarget slow(this);
+    JumpTarget done(this);
 
     // Generate fast-case code for variables that might be shadowed by
     // eval-introduced variables.  Eval is used a lot without
@@ -2018,7 +2397,13 @@
     // containing the eval.
     if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
       LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
-      __ b(&done);
+      // If there was no control flow to slow, we can exit early.
+      if (!slow.is_linked()) {
+        frame_->EmitPush(r0);
+        return;
+      }
+
+      done.Jump();
 
     } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
       Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
@@ -2030,23 +2415,25 @@
                                                  r1,
                                                  r2,
                                                  &slow));
-        __ b(&done);
+        // There is always control flow to slow from
+        // ContextSlotOperandCheckExtensions.
+        done.Jump();
       }
     }
 
-    __ bind(&slow);
-    frame_->Push(cp);
+    slow.Bind();
+    frame_->EmitPush(cp);
     __ mov(r0, Operand(slot->var()->name()));
-    frame_->Push(r0);
+    frame_->EmitPush(r0);
 
     if (typeof_state == INSIDE_TYPEOF) {
-      __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+      frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
     } else {
-      __ CallRuntime(Runtime::kLoadContextSlot, 2);
+      frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
     }
 
-    __ bind(&done);
-    frame_->Push(r0);
+    done.Bind();
+    frame_->EmitPush(r0);
 
   } else {
     // Note: We would like to keep the assert below, but it fires because of
@@ -2055,16 +2442,16 @@
 
     // Special handling for locals allocated in registers.
     __ ldr(r0, SlotOperand(slot, r2));
-    frame_->Push(r0);
+    frame_->EmitPush(r0);
     if (slot->var()->mode() == Variable::CONST) {
       // Const slots may contain 'the hole' value (the constant hasn't been
       // initialized yet) which needs to be converted into the 'undefined'
       // value.
       Comment cmnt(masm_, "[ Unhole const");
-      frame_->Pop(r0);
+      frame_->EmitPop(r0);
       __ cmp(r0, Operand(Factory::the_hole_value()));
       __ mov(r0, Operand(Factory::undefined_value()), LeaveCC, eq);
-      frame_->Push(r0);
+      frame_->EmitPush(r0);
     }
   }
 }
@@ -2074,7 +2461,7 @@
                                                       TypeofState typeof_state,
                                                       Register tmp,
                                                       Register tmp2,
-                                                      Label* slow) {
+                                                      JumpTarget* slow) {
   // Check that no extension objects have been created by calls to
   // eval from the current scope to the global scope.
   Register context = cp;
@@ -2085,7 +2472,7 @@
         // Check that extension is NULL.
         __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
         __ tst(tmp2, tmp2);
-        __ b(ne, slow);
+        slow->Branch(ne);
       }
       // Load next context in chain.
       __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
@@ -2109,7 +2496,7 @@
     // Check that extension is NULL.
     __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
     __ tst(tmp2, tmp2);
-    __ b(ne, slow);
+    slow->Branch(ne);
     // Load next context in chain.
     __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
     __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
@@ -2123,26 +2510,37 @@
   // Load the global object.
   LoadGlobal();
   // Setup the name register.
-  __ mov(r2, Operand(slot->var()->name()));
+  Result name = allocator_->Allocate(r2);
+  ASSERT(name.is_valid());  // We are in spilled code.
+  __ mov(name.reg(), Operand(slot->var()->name()));
   // Call IC stub.
   if (typeof_state == INSIDE_TYPEOF) {
-    __ Call(ic, RelocInfo::CODE_TARGET);
+    frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
   } else {
-    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, &name, 0);
   }
 
-  // Pop the global object. The result is in r0.
-  frame_->Pop();
+  // Drop the global object. The result is in r0.
+  frame_->Drop();
 }
 
 
 void CodeGenerator::VisitSlot(Slot* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ Slot");
   LoadFromSlot(node, typeof_state());
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ VariableProxy");
 
   Variable* var = node->var();
@@ -2152,19 +2550,29 @@
   } else {
     ASSERT(var->is_global());
     Reference ref(this, node);
-    ref.GetValue(typeof_state());
+    ref.GetValueAndSpill(typeof_state());
   }
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitLiteral(Literal* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ Literal");
   __ mov(r0, Operand(node->handle()));
-  frame_->Push(r0);
+  frame_->EmitPush(r0);
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ RexExp Literal");
 
   // Retrieve the literal array and check the allocated entry.
@@ -2180,65 +2588,80 @@
       FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
   __ ldr(r2, FieldMemOperand(r1, literal_offset));
 
-  Label done;
+  JumpTarget done(this);
   __ cmp(r2, Operand(Factory::undefined_value()));
-  __ b(ne, &done);
+  done.Branch(ne);
 
   // If the entry is undefined we call the runtime system to computed
   // the literal.
-  frame_->Push(r1);  // literal array  (0)
+  frame_->EmitPush(r1);  // literal array  (0)
   __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
-  frame_->Push(r0);  // literal index  (1)
+  frame_->EmitPush(r0);  // literal index  (1)
   __ mov(r0, Operand(node->pattern()));  // RegExp pattern (2)
-  frame_->Push(r0);
+  frame_->EmitPush(r0);
   __ mov(r0, Operand(node->flags()));  // RegExp flags   (3)
-  frame_->Push(r0);
-  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  frame_->EmitPush(r0);
+  frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
   __ mov(r2, Operand(r0));
 
-  __ bind(&done);
+  done.Bind();
   // Push the literal.
-  frame_->Push(r2);
+  frame_->EmitPush(r2);
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 // This deferred code stub will be used for creating the boilerplate
-// by calling Runtime_CreateObjectLiteral.
+// by calling Runtime_CreateObjectLiteralBoilerplate.
 // Each created boilerplate is stored in the JSFunction and they are
 // therefore context dependent.
-class ObjectLiteralDeferred: public DeferredCode {
+class DeferredObjectLiteral: public DeferredCode {
  public:
-  ObjectLiteralDeferred(CodeGenerator* generator, ObjectLiteral* node)
+  DeferredObjectLiteral(CodeGenerator* generator, ObjectLiteral* node)
       : DeferredCode(generator), node_(node) {
-    set_comment("[ ObjectLiteralDeferred");
+    set_comment("[ DeferredObjectLiteral");
   }
+
   virtual void Generate();
+
  private:
   ObjectLiteral* node_;
 };
 
 
-void ObjectLiteralDeferred::Generate() {
+void DeferredObjectLiteral::Generate() {
+  // Argument is passed in r1.
+  enter()->Bind();
+  VirtualFrame::SpilledScope spilled_scope(generator());
+
   // If the entry is undefined we call the runtime system to computed
   // the literal.
 
+  VirtualFrame* frame = generator()->frame();
   // Literal array (0).
-  __ push(r1);
+  frame->EmitPush(r1);
   // Literal index (1).
   __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
-  __ push(r0);
+  frame->EmitPush(r0);
   // Constant properties (2).
   __ mov(r0, Operand(node_->constant_properties()));
-  __ push(r0);
-  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
-  __ mov(r2, Operand(r0));
+  frame->EmitPush(r0);
+  Result boilerplate =
+      frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+  __ mov(r2, Operand(boilerplate.reg()));
+  // Result is returned in r2.
+  exit_.Jump();
 }
 
 
 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ ObjectLiteral");
 
-  ObjectLiteralDeferred* deferred = new ObjectLiteralDeferred(this, node);
+  DeferredObjectLiteral* deferred = new DeferredObjectLiteral(this, node);
 
   // Retrieve the literal array and check the allocated entry.
 
@@ -2256,15 +2679,15 @@
   // Check whether we need to materialize the object literal boilerplate.
   // If so, jump to the deferred code.
   __ cmp(r2, Operand(Factory::undefined_value()));
-  __ b(eq, deferred->enter());
-  __ bind(deferred->exit());
+  deferred->enter()->Branch(eq);
+  deferred->BindExit();
 
   // Push the object literal boilerplate.
-  frame_->Push(r2);
+  frame_->EmitPush(r2);
 
   // Clone the boilerplate object.
-  __ CallRuntime(Runtime::kCloneObjectLiteralBoilerplate, 1);
-  frame_->Push(r0);  // save the result
+  frame_->CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+  frame_->EmitPush(r0);  // save the result
   // r0: cloned object literal
 
   for (int i = 0; i < node->properties()->length(); i++) {
@@ -2272,56 +2695,125 @@
     Literal* key = property->key();
     Expression* value = property->value();
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT: break;
+      case ObjectLiteral::Property::CONSTANT:
+        break;
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        if (property->value()->AsMaterializedLiteral()->is_simple()) break;
+        // else fall through
       case ObjectLiteral::Property::COMPUTED:  // fall through
       case ObjectLiteral::Property::PROTOTYPE: {
-        frame_->Push(r0);  // dup the result
-        Load(key);
-        Load(value);
-        __ CallRuntime(Runtime::kSetProperty, 3);
+        frame_->EmitPush(r0);  // dup the result
+        LoadAndSpill(key);
+        LoadAndSpill(value);
+        frame_->CallRuntime(Runtime::kSetProperty, 3);
         // restore r0
         __ ldr(r0, frame_->Top());
         break;
       }
       case ObjectLiteral::Property::SETTER: {
-        frame_->Push(r0);
-        Load(key);
+        frame_->EmitPush(r0);
+        LoadAndSpill(key);
         __ mov(r0, Operand(Smi::FromInt(1)));
-        frame_->Push(r0);
-        Load(value);
-        __ CallRuntime(Runtime::kDefineAccessor, 4);
+        frame_->EmitPush(r0);
+        LoadAndSpill(value);
+        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
         __ ldr(r0, frame_->Top());
         break;
       }
       case ObjectLiteral::Property::GETTER: {
-        frame_->Push(r0);
-        Load(key);
+        frame_->EmitPush(r0);
+        LoadAndSpill(key);
         __ mov(r0, Operand(Smi::FromInt(0)));
-        frame_->Push(r0);
-        Load(value);
-        __ CallRuntime(Runtime::kDefineAccessor, 4);
+        frame_->EmitPush(r0);
+        LoadAndSpill(value);
+        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
         __ ldr(r0, frame_->Top());
         break;
       }
     }
   }
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+// This deferred code stub will be used for creating the boilerplate
+// by calling Runtime_CreateArrayLiteralBoilerplate.
+// Each created boilerplate is stored in the JSFunction and they are
+// therefore context dependent.
+class DeferredArrayLiteral: public DeferredCode {
+ public:
+  DeferredArrayLiteral(CodeGenerator* generator, ArrayLiteral* node)
+      : DeferredCode(generator), node_(node) {
+    set_comment("[ DeferredArrayLiteral");
+  }
+
+  virtual void Generate();
+
+ private:
+  ArrayLiteral* node_;
+};
+
+
+void DeferredArrayLiteral::Generate() {
+  // Argument is passed in r1.
+  enter()->Bind();
+  VirtualFrame::SpilledScope spilled_scope(generator());
+
+  // If the entry is undefined we call the runtime system to computed
+  // the literal.
+
+  VirtualFrame* frame = generator()->frame();
+  // Literal array (0).
+  frame->EmitPush(r1);
+  // Literal index (1).
+  __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
+  frame->EmitPush(r0);
+  // Constant properties (2).
+  __ mov(r0, Operand(node_->literals()));
+  frame->EmitPush(r0);
+  Result boilerplate =
+      frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+  __ mov(r2, Operand(boilerplate.reg()));
+  // Result is returned in r2.
+  exit_.Jump();
 }
 
 
 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ ArrayLiteral");
 
-  // Call runtime to create the array literal.
-  __ mov(r0, Operand(node->literals()));
-  frame_->Push(r0);
-  // Load the function of this frame.
-  __ ldr(r0, frame_->Function());
-  __ ldr(r0, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
-  frame_->Push(r0);
-  __ CallRuntime(Runtime::kCreateArrayLiteral, 2);
+  DeferredArrayLiteral* deferred = new DeferredArrayLiteral(this, node);
 
-  // Push the resulting array literal on the stack.
-  frame_->Push(r0);
+  // Retrieve the literal array and check the allocated entry.
+
+  // Load the function of this activation.
+  __ ldr(r1, frame_->Function());
+
+  // Load the literals array of the function.
+  __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  __ ldr(r2, FieldMemOperand(r1, literal_offset));
+
+  // Check whether we need to materialize the object literal boilerplate.
+  // If so, jump to the deferred code.
+  __ cmp(r2, Operand(Factory::undefined_value()));
+  deferred->enter()->Branch(eq);
+  deferred->BindExit();
+
+  // Push the object literal boilerplate.
+  frame_->EmitPush(r2);
+
+  // Clone the boilerplate object.
+  frame_->CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+  frame_->EmitPush(r0);  // save the result
+  // r0: cloned object literal
 
   // Generate code to set the elements in the array that are not
   // literals.
@@ -2332,8 +2824,8 @@
     // set in the boilerplate object.
     if (value->AsLiteral() == NULL) {
       // The property must be set by generated code.
-      Load(value);
-      frame_->Pop(r0);
+      LoadAndSpill(value);
+      frame_->EmitPop(r0);
 
       // Fetch the object literal
       __ ldr(r1, frame_->Top());
@@ -2349,90 +2841,126 @@
       __ RecordWrite(r1, r3, r2);
     }
   }
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope(this);
   // Call runtime routine to allocate the catch extension object and
   // assign the exception value to the catch variable.
-  Comment cmnt(masm_, "[CatchExtensionObject ");
-  Load(node->key());
-  Load(node->value());
-  __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
-  frame_->Push(r0);
+  Comment cmnt(masm_, "[ CatchExtensionObject");
+  LoadAndSpill(node->key());
+  LoadAndSpill(node->value());
+  Result result =
+      frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+  frame_->EmitPush(result.reg());
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitAssignment(Assignment* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ Assignment");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
-  Reference target(this, node->target());
-  if (target.is_illegal()) return;
+  { Reference target(this, node->target());
+    if (target.is_illegal()) {
+      // Fool the virtual frame into thinking that we left the assignment's
+      // value on the frame.
+      __ mov(r0, Operand(Smi::FromInt(0)));
+      frame_->EmitPush(r0);
+      ASSERT(frame_->height() == original_height + 1);
+      return;
+    }
 
-  if (node->op() == Token::ASSIGN ||
-      node->op() == Token::INIT_VAR ||
-      node->op() == Token::INIT_CONST) {
-    Load(node->value());
-
-  } else {
-    target.GetValue(NOT_INSIDE_TYPEOF);
-    Literal* literal = node->value()->AsLiteral();
-    if (literal != NULL && literal->handle()->IsSmi()) {
-      SmiOperation(node->binary_op(), literal->handle(), false);
-      frame_->Push(r0);
+    if (node->op() == Token::ASSIGN ||
+        node->op() == Token::INIT_VAR ||
+        node->op() == Token::INIT_CONST) {
+      LoadAndSpill(node->value());
 
     } else {
-      Load(node->value());
-      GenericBinaryOperation(node->binary_op());
-      frame_->Push(r0);
+      target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+      Literal* literal = node->value()->AsLiteral();
+      if (literal != NULL && literal->handle()->IsSmi()) {
+        SmiOperation(node->binary_op(), literal->handle(), false);
+        frame_->EmitPush(r0);
+
+      } else {
+        LoadAndSpill(node->value());
+        GenericBinaryOperation(node->binary_op());
+        frame_->EmitPush(r0);
+      }
     }
-  }
 
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  if (var != NULL &&
-      (var->mode() == Variable::CONST) &&
-      node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
-    // Assignment ignored - leave the value on the stack.
+    Variable* var = node->target()->AsVariableProxy()->AsVariable();
+    if (var != NULL &&
+        (var->mode() == Variable::CONST) &&
+        node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+      // Assignment ignored - leave the value on the stack.
 
-  } else {
-    CodeForSourcePosition(node->position());
-    if (node->op() == Token::INIT_CONST) {
-      // Dynamic constant initializations must use the function context
-      // and initialize the actual constant declared. Dynamic variable
-      // initializations are simply assignments and use SetValue.
-      target.SetValue(CONST_INIT);
     } else {
-      target.SetValue(NOT_CONST_INIT);
+      CodeForSourcePosition(node->position());
+      if (node->op() == Token::INIT_CONST) {
+        // Dynamic constant initializations must use the function context
+        // and initialize the actual constant declared. Dynamic variable
+        // initializations are simply assignments and use SetValue.
+        target.SetValue(CONST_INIT);
+      } else {
+        target.SetValue(NOT_CONST_INIT);
+      }
     }
   }
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitThrow(Throw* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ Throw");
 
-  Load(node->exception());
+  LoadAndSpill(node->exception());
   CodeForSourcePosition(node->position());
-  __ CallRuntime(Runtime::kThrow, 1);
-  frame_->Push(r0);
+  frame_->CallRuntime(Runtime::kThrow, 1);
+  frame_->EmitPush(r0);
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitProperty(Property* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ Property");
 
-  Reference property(this, node);
-  property.GetValue(typeof_state());
+  { Reference property(this, node);
+    property.GetValueAndSpill(typeof_state());
+  }
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitCall(Call* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ Call");
 
   ZoneList<Expression*>* args = node->arguments();
 
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
   // Standard function call.
 
   // Check if the function is a variable or a property.
@@ -2456,7 +2984,7 @@
 
     // Push the name of the function and the receiver onto the stack.
     __ mov(r0, Operand(var->name()));
-    frame_->Push(r0);
+    frame_->EmitPush(r0);
 
     // Pass the global object as the receiver and let the IC stub
     // patch the stack to use the global proxy as 'this' in the
@@ -2464,16 +2992,20 @@
     LoadGlobal();
 
     // Load the arguments.
-    for (int i = 0; i < args->length(); i++) Load(args->at(i));
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      LoadAndSpill(args->at(i));
+    }
 
     // Setup the receiver register and call the IC initialization code.
-    Handle<Code> stub = ComputeCallInitialize(args->length());
+    Handle<Code> stub = ComputeCallInitialize(arg_count);
     CodeForSourcePosition(node->position());
-    __ Call(stub, RelocInfo::CODE_TARGET_CONTEXT);
+    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
+                           arg_count + 1);
     __ ldr(cp, frame_->Context());
     // Remove the function from the stack.
-    frame_->Pop();
-    frame_->Push(r0);
+    frame_->Drop();
+    frame_->EmitPush(r0);
 
   } else if (var != NULL && var->slot() != NULL &&
              var->slot()->type() == Slot::LOOKUP) {
@@ -2482,19 +3014,19 @@
     // ----------------------------------
 
     // Load the function
-    frame_->Push(cp);
+    frame_->EmitPush(cp);
     __ mov(r0, Operand(var->name()));
-    frame_->Push(r0);
-    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    frame_->EmitPush(r0);
+    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
     // r0: slot value; r1: receiver
 
     // Load the receiver.
-    frame_->Push(r0);  // function
-    frame_->Push(r1);  // receiver
+    frame_->EmitPush(r0);  // function
+    frame_->EmitPush(r1);  // receiver
 
     // Call the function.
     CallWithArguments(args, node->position());
-    frame_->Push(r0);
+    frame_->EmitPush(r0);
 
   } else if (property != NULL) {
     // Check if the key is a literal string.
@@ -2507,22 +3039,25 @@
 
       // Push the name of the function and the receiver onto the stack.
       __ mov(r0, Operand(literal->handle()));
-      frame_->Push(r0);
-      Load(property->obj());
+      frame_->EmitPush(r0);
+      LoadAndSpill(property->obj());
 
       // Load the arguments.
-      for (int i = 0; i < args->length(); i++) Load(args->at(i));
+      int arg_count = args->length();
+      for (int i = 0; i < arg_count; i++) {
+        LoadAndSpill(args->at(i));
+      }
 
       // Set the receiver register and call the IC initialization code.
-      Handle<Code> stub = ComputeCallInitialize(args->length());
+      Handle<Code> stub = ComputeCallInitialize(arg_count);
       CodeForSourcePosition(node->position());
-      __ Call(stub, RelocInfo::CODE_TARGET);
+      frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
       __ ldr(cp, frame_->Context());
 
       // Remove the function from the stack.
-      frame_->Pop();
+      frame_->Drop();
 
-      frame_->Push(r0);  // push after get rid of function from the stack
+      frame_->EmitPush(r0);  // push after get rid of function from the stack
 
     } else {
       // -------------------------------------------
@@ -2531,14 +3066,19 @@
 
       // Load the function to call from the property through a reference.
       Reference ref(this, property);
-      ref.GetValue(NOT_INSIDE_TYPEOF);  // receiver
+      ref.GetValueAndSpill(NOT_INSIDE_TYPEOF);  // receiver
 
       // Pass receiver to called function.
-      __ ldr(r0, frame_->Element(ref.size()));
-      frame_->Push(r0);
+      if (property->is_synthetic()) {
+        LoadGlobalReceiver(r0);
+      } else {
+        __ ldr(r0, frame_->ElementAt(ref.size()));
+        frame_->EmitPush(r0);
+      }
+
       // Call the function.
       CallWithArguments(args, node->position());
-      frame_->Push(r0);
+      frame_->EmitPush(r0);
     }
 
   } else {
@@ -2547,19 +3087,24 @@
     // ----------------------------------
 
     // Load the function.
-    Load(function);
+    LoadAndSpill(function);
 
     // Pass the global proxy as the receiver.
     LoadGlobalReceiver(r0);
 
     // Call the function.
     CallWithArguments(args, node->position());
-    frame_->Push(r0);
+    frame_->EmitPush(r0);
   }
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitCallEval(CallEval* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ CallEval");
 
   // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
@@ -2569,51 +3114,57 @@
   ZoneList<Expression*>* args = node->arguments();
   Expression* function = node->expression();
 
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
   // Prepare stack for call to resolved function.
-  Load(function);
+  LoadAndSpill(function);
   __ mov(r2, Operand(Factory::undefined_value()));
-  __ push(r2);  // Slot for receiver
-  for (int i = 0; i < args->length(); i++) {
-    Load(args->at(i));
+  frame_->EmitPush(r2);  // Slot for receiver
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    LoadAndSpill(args->at(i));
   }
 
   // Prepare stack for call to ResolvePossiblyDirectEval.
-  __ ldr(r1, MemOperand(sp, args->length() * kPointerSize + kPointerSize));
-  __ push(r1);
-  if (args->length() > 0) {
-    __ ldr(r1, MemOperand(sp, args->length() * kPointerSize));
-    __ push(r1);
+  __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
+  frame_->EmitPush(r1);
+  if (arg_count > 0) {
+    __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+    frame_->EmitPush(r1);
   } else {
-    __ push(r2);
+    frame_->EmitPush(r2);
   }
 
   // Resolve the call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+  frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
 
   // Touch up stack with the right values for the function and the receiver.
   __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
-  __ str(r1, MemOperand(sp, (args->length() + 1) * kPointerSize));
+  __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
-  __ str(r1, MemOperand(sp, args->length() * kPointerSize));
+  __ str(r1, MemOperand(sp, arg_count * kPointerSize));
 
   // Call the function.
   CodeForSourcePosition(node->position());
 
-  CallFunctionStub call_function(args->length());
-  __ CallStub(&call_function);
+  CallFunctionStub call_function(arg_count);
+  frame_->CallStub(&call_function, arg_count + 1);
 
   __ ldr(cp, frame_->Context());
   // Remove the function from the stack.
-  frame_->Pop();
-  frame_->Push(r0);
+  frame_->Drop();
+  frame_->EmitPush(r0);
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitCallNew(CallNew* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ CallNew");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
   // According to ECMA-262, section 11.2.2, page 44, the function
   // expression in new calls must be evaluated before the
@@ -2624,106 +3175,123 @@
   // Compute function to call and use the global object as the
   // receiver. There is no need to use the global proxy here because
   // it will always be replaced with a newly allocated object.
-  Load(node->expression());
+  LoadAndSpill(node->expression());
   LoadGlobal();
 
   // Push the arguments ("left-to-right") on the stack.
   ZoneList<Expression*>* args = node->arguments();
-  for (int i = 0; i < args->length(); i++) Load(args->at(i));
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    LoadAndSpill(args->at(i));
+  }
 
   // r0: the number of arguments.
-  __ mov(r0, Operand(args->length()));
+  Result num_args = allocator_->Allocate(r0);
+  ASSERT(num_args.is_valid());
+  __ mov(num_args.reg(), Operand(arg_count));
 
   // Load the function into r1 as per calling convention.
-  __ ldr(r1, frame_->Element(args->length() + 1));
+  Result function = allocator_->Allocate(r1);
+  ASSERT(function.is_valid());
+  __ ldr(function.reg(), frame_->ElementAt(arg_count + 1));
 
   // Call the construct call builtin that handles allocation and
   // constructor invocation.
   CodeForSourcePosition(node->position());
-  __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
-          RelocInfo::CONSTRUCT_CALL);
+  Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+  Result result = frame_->CallCodeObject(ic,
+                                         RelocInfo::CONSTRUCT_CALL,
+                                         &num_args,
+                                         &function,
+                                         arg_count + 1);
 
   // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
   __ str(r0, frame_->Top());
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   ASSERT(args->length() == 1);
-  Label leave;
-  Load(args->at(0));
-  frame_->Pop(r0);  // r0 contains object.
+  JumpTarget leave(this);
+  LoadAndSpill(args->at(0));
+  frame_->EmitPop(r0);  // r0 contains object.
   // if (object->IsSmi()) return the object.
   __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &leave);
+  leave.Branch(eq);
   // It is a heap object - get map.
   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
   // if (!object->IsJSValue()) return the object.
   __ cmp(r1, Operand(JS_VALUE_TYPE));
-  __ b(ne, &leave);
+  leave.Branch(ne);
   // Load the value.
   __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
-  __ bind(&leave);
-  frame_->Push(r0);
+  leave.Bind();
+  frame_->EmitPush(r0);
 }
 
 
 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   ASSERT(args->length() == 2);
-  Label leave;
-  Load(args->at(0));  // Load the object.
-  Load(args->at(1));  // Load the value.
-  frame_->Pop(r0);  // r0 contains value
-  frame_->Pop(r1);  // r1 contains object
+  JumpTarget leave(this);
+  LoadAndSpill(args->at(0));  // Load the object.
+  LoadAndSpill(args->at(1));  // Load the value.
+  frame_->EmitPop(r0);  // r0 contains value
+  frame_->EmitPop(r1);  // r1 contains object
   // if (object->IsSmi()) return object.
   __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &leave);
+  leave.Branch(eq);
   // It is a heap object - get map.
   __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
   __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
   // if (!object->IsJSValue()) return object.
   __ cmp(r2, Operand(JS_VALUE_TYPE));
-  __ b(ne, &leave);
+  leave.Branch(ne);
   // Store the value.
   __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
   // Update the write barrier.
   __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
   __ RecordWrite(r1, r2, r3);
   // Leave.
-  __ bind(&leave);
-  frame_->Push(r0);
+  leave.Bind();
+  frame_->EmitPush(r0);
 }
 
 
 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   ASSERT(args->length() == 1);
-  Load(args->at(0));
-  frame_->Pop(r0);
+  LoadAndSpill(args->at(0));
+  frame_->EmitPop(r0);
   __ tst(r0, Operand(kSmiTagMask));
   cc_reg_ = eq;
 }
 
 
 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
   ASSERT_EQ(args->length(), 3);
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (ShouldGenerateLog(args->at(0))) {
-    Load(args->at(1));
-    Load(args->at(2));
+    LoadAndSpill(args->at(1));
+    LoadAndSpill(args->at(2));
     __ CallRuntime(Runtime::kLog, 2);
   }
 #endif
   __ mov(r0, Operand(Factory::undefined_value()));
-  frame_->Push(r0);
+  frame_->EmitPush(r0);
 }
 
 
 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   ASSERT(args->length() == 1);
-  Load(args->at(0));
-  frame_->Pop(r0);
+  LoadAndSpill(args->at(0));
+  frame_->EmitPop(r0);
   __ tst(r0, Operand(kSmiTagMask | 0x80000000));
   cc_reg_ = eq;
 }
@@ -2733,34 +3301,37 @@
 // undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
 // It is not yet implemented on ARM, so it always goes to the slow case.
 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   ASSERT(args->length() == 2);
   __ mov(r0, Operand(Factory::undefined_value()));
-  frame_->Push(r0);
+  frame_->EmitPush(r0);
 }
 
 
 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Label answer;
+  LoadAndSpill(args->at(0));
+  JumpTarget answer(this);
   // We need the CC bits to come out as not_equal in the case where the
   // object is a smi.  This can't be done with the usual test opcode so
   // we use XOR to get the right CC bits.
-  frame_->Pop(r0);
+  frame_->EmitPop(r0);
   __ and_(r1, r0, Operand(kSmiTagMask));
   __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
-  __ b(ne, &answer);
+  answer.Branch(ne);
   // It is a heap object - get the map.
   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
   // Check if the object is a JS array or not.
   __ cmp(r1, Operand(JS_ARRAY_TYPE));
-  __ bind(&answer);
+  answer.Bind();
   cc_reg_ = eq;
 }
 
 
 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   ASSERT(args->length() == 0);
 
   // Seed the result with the formal parameters count, which will be used
@@ -2769,42 +3340,52 @@
 
   // Call the shared stub to get to the arguments.length.
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
-  __ CallStub(&stub);
-  frame_->Push(r0);
+  frame_->CallStub(&stub, 0);
+  frame_->EmitPush(r0);
 }
 
 
 void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   ASSERT(args->length() == 1);
 
   // Satisfy contract with ArgumentsAccessStub:
   // Load the key into r1 and the formal parameters count into r0.
-  Load(args->at(0));
-  frame_->Pop(r1);
+  LoadAndSpill(args->at(0));
+  frame_->EmitPop(r1);
   __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
 
   // Call the shared stub to get to arguments[key].
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
-  __ CallStub(&stub);
-  frame_->Push(r0);
+  frame_->CallStub(&stub, 0);
+  frame_->EmitPush(r0);
 }
 
 
 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope(this);
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
-  Load(args->at(0));
-  Load(args->at(1));
-  frame_->Pop(r0);
-  frame_->Pop(r1);
+  LoadAndSpill(args->at(0));
+  LoadAndSpill(args->at(1));
+  frame_->EmitPop(r0);
+  frame_->EmitPop(r1);
   __ cmp(r0, Operand(r1));
   cc_reg_ = eq;
 }
 
 
 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-  if (CheckForInlineRuntimeCall(node)) return;
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
+  if (CheckForInlineRuntimeCall(node)) {
+    ASSERT((has_cc() && frame_->height() == original_height) ||
+           (!has_cc() && frame_->height() == original_height + 1));
+    return;
+  }
 
   ZoneList<Expression*>* args = node->arguments();
   Comment cmnt(masm_, "[ CallRuntime");
@@ -2812,76 +3393,93 @@
 
   if (function != NULL) {
     // Push the arguments ("left-to-right").
-    for (int i = 0; i < args->length(); i++) Load(args->at(i));
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      LoadAndSpill(args->at(i));
+    }
 
     // Call the C runtime function.
-    __ CallRuntime(function, args->length());
-    frame_->Push(r0);
+    frame_->CallRuntime(function, arg_count);
+    frame_->EmitPush(r0);
 
   } else {
     // Prepare stack for calling JS runtime function.
     __ mov(r0, Operand(node->name()));
-    frame_->Push(r0);
+    frame_->EmitPush(r0);
     // Push the builtins object found in the current global object.
     __ ldr(r1, GlobalObject());
     __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
-    frame_->Push(r0);
+    frame_->EmitPush(r0);
 
-    for (int i = 0; i < args->length(); i++) Load(args->at(i));
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      LoadAndSpill(args->at(i));
+    }
 
     // Call the JS runtime function.
     Handle<Code> stub = ComputeCallInitialize(args->length());
-    __ Call(stub, RelocInfo::CODE_TARGET);
+    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
     __ ldr(cp, frame_->Context());
-    frame_->Pop();
-    frame_->Push(r0);
+    frame_->Drop();
+    frame_->EmitPush(r0);
   }
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ UnaryOperation");
 
   Token::Value op = node->op();
 
   if (op == Token::NOT) {
-    LoadCondition(node->expression(),
-                  NOT_INSIDE_TYPEOF,
-                  false_target(),
-                  true_target(),
-                  true);
+    LoadConditionAndSpill(node->expression(),
+                          NOT_INSIDE_TYPEOF,
+                          false_target(),
+                          true_target(),
+                          true);
     cc_reg_ = NegateCondition(cc_reg_);
 
   } else if (op == Token::DELETE) {
     Property* property = node->expression()->AsProperty();
     Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
     if (property != NULL) {
-      Load(property->obj());
-      Load(property->key());
-      __ mov(r0, Operand(1));  // not counting receiver
-      __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+      LoadAndSpill(property->obj());
+      LoadAndSpill(property->key());
+      Result arg_count = allocator_->Allocate(r0);
+      ASSERT(arg_count.is_valid());
+      __ mov(arg_count.reg(), Operand(1));  // not counting receiver
+      frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
 
     } else if (variable != NULL) {
       Slot* slot = variable->slot();
       if (variable->is_global()) {
         LoadGlobal();
         __ mov(r0, Operand(variable->name()));
-        frame_->Push(r0);
-        __ mov(r0, Operand(1));  // not counting receiver
-        __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+        frame_->EmitPush(r0);
+        Result arg_count = allocator_->Allocate(r0);
+        ASSERT(arg_count.is_valid());
+        __ mov(arg_count.reg(), Operand(1));  // not counting receiver
+        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
 
       } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
         // lookup the context holding the named variable
-        frame_->Push(cp);
+        frame_->EmitPush(cp);
         __ mov(r0, Operand(variable->name()));
-        frame_->Push(r0);
-        __ CallRuntime(Runtime::kLookupContext, 2);
+        frame_->EmitPush(r0);
+        frame_->CallRuntime(Runtime::kLookupContext, 2);
         // r0: context
-        frame_->Push(r0);
+        frame_->EmitPush(r0);
         __ mov(r0, Operand(variable->name()));
-        frame_->Push(r0);
-        __ mov(r0, Operand(1));  // not counting receiver
-        __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+        frame_->EmitPush(r0);
+        Result arg_count = allocator_->Allocate(r0);
+        ASSERT(arg_count.is_valid());
+        __ mov(arg_count.reg(), Operand(1));  // not counting receiver
+        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
 
       } else {
         // Default: Result of deleting non-global, not dynamically
@@ -2891,22 +3489,22 @@
 
     } else {
       // Default: Result of deleting expressions is true.
-      Load(node->expression());  // may have side-effects
-      frame_->Pop();
+      LoadAndSpill(node->expression());  // may have side-effects
+      frame_->Drop();
       __ mov(r0, Operand(Factory::true_value()));
     }
-    frame_->Push(r0);
+    frame_->EmitPush(r0);
 
   } else if (op == Token::TYPEOF) {
     // Special case for loading the typeof expression; see comment on
     // LoadTypeofExpression().
     LoadTypeofExpression(node->expression());
-    __ CallRuntime(Runtime::kTypeof, 1);
-    frame_->Push(r0);  // r0 has result
+    frame_->CallRuntime(Runtime::kTypeof, 1);
+    frame_->EmitPush(r0);  // r0 has result
 
   } else {
-    Load(node->expression());
-    frame_->Pop(r0);
+    LoadAndSpill(node->expression());
+    frame_->EmitPop(r0);
     switch (op) {
       case Token::NOT:
       case Token::DELETE:
@@ -2916,26 +3514,28 @@
 
       case Token::SUB: {
         UnarySubStub stub;
-        __ CallStub(&stub);
+        frame_->CallStub(&stub, 0);
         break;
       }
 
       case Token::BIT_NOT: {
         // smi check
-        Label smi_label;
-        Label continue_label;
+        JumpTarget smi_label(this);
+        JumpTarget continue_label(this);
         __ tst(r0, Operand(kSmiTagMask));
-        __ b(eq, &smi_label);
+        smi_label.Branch(eq);
 
-        frame_->Push(r0);
-        __ mov(r0, Operand(0));  // not counting receiver
-        __ InvokeBuiltin(Builtins::BIT_NOT, CALL_JS);
+        frame_->EmitPush(r0);
+        Result arg_count = allocator_->Allocate(r0);
+        ASSERT(arg_count.is_valid());
+        __ mov(arg_count.reg(), Operand(0));  // not counting receiver
+        frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
 
-        __ b(&continue_label);
-        __ bind(&smi_label);
+        continue_label.Jump();
+        smi_label.Bind();
         __ mvn(r0, Operand(r0));
         __ bic(r0, r0, Operand(kSmiTagMask));  // bit-clear inverted smi-tag
-        __ bind(&continue_label);
+        continue_label.Bind();
         break;
       }
 
@@ -2947,24 +3547,32 @@
 
       case Token::ADD: {
         // Smi check.
-        Label continue_label;
+        JumpTarget continue_label(this);
         __ tst(r0, Operand(kSmiTagMask));
-        __ b(eq, &continue_label);
-        frame_->Push(r0);
-        __ mov(r0, Operand(0));  // not counting receiver
-        __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
-        __ bind(&continue_label);
+        continue_label.Branch(eq);
+        frame_->EmitPush(r0);
+        Result arg_count = allocator_->Allocate(r0);
+        ASSERT(arg_count.is_valid());
+        __ mov(arg_count.reg(), Operand(0));  // not counting receiver
+        frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+        continue_label.Bind();
         break;
       }
       default:
         UNREACHABLE();
     }
-    frame_->Push(r0);  // r0 has result
+    frame_->EmitPush(r0);  // r0 has result
   }
+  ASSERT((has_cc() && frame_->height() == original_height) ||
+         (!has_cc() && frame_->height() == original_height + 1));
 }
 
 
 void CodeGenerator::VisitCountOperation(CountOperation* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ CountOperation");
 
   bool is_postfix = node->is_postfix();
@@ -2976,26 +3584,36 @@
   // Postfix: Make room for the result.
   if (is_postfix) {
      __ mov(r0, Operand(0));
-     frame_->Push(r0);
+     frame_->EmitPush(r0);
   }
 
   { Reference target(this, node->expression());
-    if (target.is_illegal()) return;
-    target.GetValue(NOT_INSIDE_TYPEOF);
-    frame_->Pop(r0);
+    if (target.is_illegal()) {
+      // Spoof the virtual frame to have the expected height (one higher
+      // than on entry).
+      if (!is_postfix) {
+        __ mov(r0, Operand(Smi::FromInt(0)));
+        frame_->EmitPush(r0);
+      }
+      ASSERT(frame_->height() == original_height + 1);
+      return;
+    }
+    target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+    frame_->EmitPop(r0);
 
-    Label slow, exit;
+    JumpTarget slow(this);
+    JumpTarget exit(this);
 
     // Load the value (1) into register r1.
     __ mov(r1, Operand(Smi::FromInt(1)));
 
     // Check for smi operand.
     __ tst(r0, Operand(kSmiTagMask));
-    __ b(ne, &slow);
+    slow.Branch(ne);
 
     // Postfix: Store the old value as the result.
     if (is_postfix) {
-      __ str(r0, frame_->Element(target.size()));
+      __ str(r0, frame_->ElementAt(target.size()));
     }
 
     // Perform optimistic increment/decrement.
@@ -3006,7 +3624,7 @@
     }
 
     // If the increment/decrement didn't overflow, we're done.
-    __ b(vc, &exit);
+    exit.Branch(vc);
 
     // Revert optimistic increment/decrement.
     if (is_increment) {
@@ -3016,37 +3634,42 @@
     }
 
     // Slow case: Convert to number.
-    __ bind(&slow);
+    slow.Bind();
 
     // Postfix: Convert the operand to a number and store it as the result.
     if (is_postfix) {
       InvokeBuiltinStub stub(InvokeBuiltinStub::ToNumber, 2);
-      __ CallStub(&stub);
+      frame_->CallStub(&stub, 0);
       // Store to result (on the stack).
-      __ str(r0, frame_->Element(target.size()));
+      __ str(r0, frame_->ElementAt(target.size()));
     }
 
     // Compute the new value by calling the right JavaScript native.
     if (is_increment) {
       InvokeBuiltinStub stub(InvokeBuiltinStub::Inc, 1);
-      __ CallStub(&stub);
+      frame_->CallStub(&stub, 0);
     } else {
       InvokeBuiltinStub stub(InvokeBuiltinStub::Dec, 1);
-      __ CallStub(&stub);
+      frame_->CallStub(&stub, 0);
     }
 
     // Store the new value in the target if not const.
-    __ bind(&exit);
-    frame_->Push(r0);
+    exit.Bind();
+    frame_->EmitPush(r0);
     if (!is_const) target.SetValue(NOT_CONST_INIT);
   }
 
   // Postfix: Discard the new value and use the old.
-  if (is_postfix) frame_->Pop(r0);
+  if (is_postfix) frame_->EmitPop(r0);
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ BinaryOperation");
   Token::Value op = node->op();
 
@@ -3063,28 +3686,29 @@
   // of compiling the binary operation is materialized or not.
 
   if (op == Token::AND) {
-    Label is_true;
-    LoadCondition(node->left(),
-                  NOT_INSIDE_TYPEOF,
-                  &is_true,
-                  false_target(),
-                  false);
+    JumpTarget is_true(this);
+    LoadConditionAndSpill(node->left(),
+                          NOT_INSIDE_TYPEOF,
+                          &is_true,
+                          false_target(),
+                          false);
     if (has_cc()) {
       Branch(false, false_target());
 
       // Evaluate right side expression.
-      __ bind(&is_true);
-      LoadCondition(node->right(),
-                    NOT_INSIDE_TYPEOF,
-                    true_target(),
-                    false_target(),
-                    false);
+      is_true.Bind();
+      LoadConditionAndSpill(node->right(),
+                            NOT_INSIDE_TYPEOF,
+                            true_target(),
+                            false_target(),
+                            false);
 
     } else {
-      Label pop_and_continue, exit;
+      JumpTarget pop_and_continue(this);
+      JumpTarget exit(this);
 
       __ ldr(r0, frame_->Top());  // dup the stack top
-      frame_->Push(r0);
+      frame_->EmitPush(r0);
       // Avoid popping the result if it converts to 'false' using the
       // standard ToBoolean() conversion as described in ECMA-262,
       // section 9.2, page 30.
@@ -3092,40 +3716,41 @@
       Branch(false, &exit);
 
       // Pop the result of evaluating the first part.
-      __ bind(&pop_and_continue);
-      frame_->Pop(r0);
+      pop_and_continue.Bind();
+      frame_->EmitPop(r0);
 
       // Evaluate right side expression.
-      __ bind(&is_true);
-      Load(node->right());
+      is_true.Bind();
+      LoadAndSpill(node->right());
 
       // Exit (always with a materialized value).
-      __ bind(&exit);
+      exit.Bind();
     }
 
   } else if (op == Token::OR) {
-    Label is_false;
-    LoadCondition(node->left(),
-                  NOT_INSIDE_TYPEOF,
-                  true_target(),
-                  &is_false,
-                  false);
+    JumpTarget is_false(this);
+    LoadConditionAndSpill(node->left(),
+                          NOT_INSIDE_TYPEOF,
+                          true_target(),
+                          &is_false,
+                          false);
     if (has_cc()) {
       Branch(true, true_target());
 
       // Evaluate right side expression.
-      __ bind(&is_false);
-      LoadCondition(node->right(),
-                    NOT_INSIDE_TYPEOF,
-                    true_target(),
-                    false_target(),
-                    false);
+      is_false.Bind();
+      LoadConditionAndSpill(node->right(),
+                            NOT_INSIDE_TYPEOF,
+                            true_target(),
+                            false_target(),
+                            false);
 
     } else {
-      Label pop_and_continue, exit;
+      JumpTarget pop_and_continue(this);
+      JumpTarget exit(this);
 
       __ ldr(r0, frame_->Top());
-      frame_->Push(r0);
+      frame_->EmitPush(r0);
       // Avoid popping the result if it converts to 'true' using the
       // standard ToBoolean() conversion as described in ECMA-262,
       // section 9.2, page 30.
@@ -3133,15 +3758,15 @@
       Branch(true, &exit);
 
       // Pop the result of evaluating the first part.
-      __ bind(&pop_and_continue);
-      frame_->Pop(r0);
+      pop_and_continue.Bind();
+      frame_->EmitPop(r0);
 
       // Evaluate right side expression.
-      __ bind(&is_false);
-      Load(node->right());
+      is_false.Bind();
+      LoadAndSpill(node->right());
 
       // Exit (always with a materialized value).
-      __ bind(&exit);
+      exit.Bind();
     }
 
   } else {
@@ -3151,30 +3776,41 @@
     Literal* rliteral = node->right()->AsLiteral();
 
     if (rliteral != NULL && rliteral->handle()->IsSmi()) {
-      Load(node->left());
+      LoadAndSpill(node->left());
       SmiOperation(node->op(), rliteral->handle(), false);
 
     } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
-      Load(node->right());
+      LoadAndSpill(node->right());
       SmiOperation(node->op(), lliteral->handle(), true);
 
     } else {
-      Load(node->left());
-      Load(node->right());
+      LoadAndSpill(node->left());
+      LoadAndSpill(node->right());
       GenericBinaryOperation(node->op());
     }
-    frame_->Push(r0);
+    frame_->EmitPush(r0);
   }
+  ASSERT((has_cc() && frame_->height() == original_height) ||
+         (!has_cc() && frame_->height() == original_height + 1));
 }
 
 
 void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   __ ldr(r0, frame_->Function());
-  frame_->Push(r0);
+  frame_->EmitPush(r0);
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ CompareOperation");
 
   // Get the expressions from the node.
@@ -3193,20 +3829,20 @@
         right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
     // The 'null' value can only be equal to 'null' or 'undefined'.
     if (left_is_null || right_is_null) {
-      Load(left_is_null ? right : left);
-      frame_->Pop(r0);
+      LoadAndSpill(left_is_null ? right : left);
+      frame_->EmitPop(r0);
       __ cmp(r0, Operand(Factory::null_value()));
 
       // The 'null' value is only equal to 'undefined' if using non-strict
       // comparisons.
       if (op != Token::EQ_STRICT) {
-        __ b(eq, true_target());
+        true_target()->Branch(eq);
 
         __ cmp(r0, Operand(Factory::undefined_value()));
-        __ b(eq, true_target());
+        true_target()->Branch(eq);
 
         __ tst(r0, Operand(kSmiTagMask));
-        __ b(eq, false_target());
+        false_target()->Branch(eq);
 
         // It can be an undetectable object.
         __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -3216,6 +3852,7 @@
       }
 
       cc_reg_ = eq;
+      ASSERT(has_cc() && frame_->height() == original_height);
       return;
     }
   }
@@ -3232,18 +3869,18 @@
 
     // Load the operand, move it to register r1.
     LoadTypeofExpression(operation->expression());
-    frame_->Pop(r1);
+    frame_->EmitPop(r1);
 
     if (check->Equals(Heap::number_symbol())) {
       __ tst(r1, Operand(kSmiTagMask));
-      __ b(eq, true_target());
+      true_target()->Branch(eq);
       __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
       __ cmp(r1, Operand(Factory::heap_number_map()));
       cc_reg_ = eq;
 
     } else if (check->Equals(Heap::string_symbol())) {
       __ tst(r1, Operand(kSmiTagMask));
-      __ b(eq, false_target());
+      false_target()->Branch(eq);
 
       __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
 
@@ -3251,7 +3888,7 @@
       __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
       __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
       __ cmp(r2, Operand(1 << Map::kIsUndetectable));
-      __ b(eq, false_target());
+      false_target()->Branch(eq);
 
       __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset));
       __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
@@ -3259,16 +3896,16 @@
 
     } else if (check->Equals(Heap::boolean_symbol())) {
       __ cmp(r1, Operand(Factory::true_value()));
-      __ b(eq, true_target());
+      true_target()->Branch(eq);
       __ cmp(r1, Operand(Factory::false_value()));
       cc_reg_ = eq;
 
     } else if (check->Equals(Heap::undefined_symbol())) {
       __ cmp(r1, Operand(Factory::undefined_value()));
-      __ b(eq, true_target());
+      true_target()->Branch(eq);
 
       __ tst(r1, Operand(kSmiTagMask));
-      __ b(eq, false_target());
+      false_target()->Branch(eq);
 
       // It can be an undetectable object.
       __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -3280,7 +3917,7 @@
 
     } else if (check->Equals(Heap::function_symbol())) {
       __ tst(r1, Operand(kSmiTagMask));
-      __ b(eq, false_target());
+      false_target()->Branch(eq);
       __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
       __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
       __ cmp(r1, Operand(JS_FUNCTION_TYPE));
@@ -3288,34 +3925,36 @@
 
     } else if (check->Equals(Heap::object_symbol())) {
       __ tst(r1, Operand(kSmiTagMask));
-      __ b(eq, false_target());
+      false_target()->Branch(eq);
 
       __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
       __ cmp(r1, Operand(Factory::null_value()));
-      __ b(eq, true_target());
+      true_target()->Branch(eq);
 
       // It can be an undetectable object.
       __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
       __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
       __ cmp(r1, Operand(1 << Map::kIsUndetectable));
-      __ b(eq, false_target());
+      false_target()->Branch(eq);
 
       __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
       __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
-      __ b(lt, false_target());
+      false_target()->Branch(lt);
       __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
       cc_reg_ = le;
 
     } else {
       // Uncommon case: typeof testing against a string literal that is
       // never returned from the typeof operator.
-      __ b(false_target());
+      false_target()->Jump();
     }
+    ASSERT(!has_valid_frame() ||
+           (has_cc() && frame_->height() == original_height));
     return;
   }
 
-  Load(left);
-  Load(right);
+  LoadAndSpill(left);
+  LoadAndSpill(right);
   switch (op) {
     case Token::EQ:
       Comparison(eq, false);
@@ -3341,25 +3980,44 @@
       Comparison(eq, true);
       break;
 
-    case Token::IN:
-      __ mov(r0, Operand(1));  // not counting receiver
-      __ InvokeBuiltin(Builtins::IN, CALL_JS);
-      frame_->Push(r0);
+    case Token::IN: {
+      Result arg_count = allocator_->Allocate(r0);
+      ASSERT(arg_count.is_valid());
+      __ mov(arg_count.reg(), Operand(1));  // not counting receiver
+      Result result = frame_->InvokeBuiltin(Builtins::IN,
+                                            CALL_JS,
+                                            &arg_count,
+                                            2);
+      frame_->EmitPush(result.reg());
       break;
+    }
 
-    case Token::INSTANCEOF:
-      __ mov(r0, Operand(1));  // not counting receiver
-      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
-      __ tst(r0, Operand(r0));
+    case Token::INSTANCEOF: {
+      Result arg_count = allocator_->Allocate(r0);
+      ASSERT(arg_count.is_valid());
+      __ mov(arg_count.reg(), Operand(1));  // not counting receiver
+      Result result = frame_->InvokeBuiltin(Builtins::INSTANCE_OF,
+                                            CALL_JS,
+                                            &arg_count,
+                                            2);
+      __ tst(result.reg(), Operand(result.reg()));
       cc_reg_ = eq;
       break;
+    }
 
     default:
       UNREACHABLE();
   }
+  ASSERT((has_cc() && frame_->height() == original_height) ||
+         (!has_cc() && frame_->height() == original_height + 1));
 }
 
 
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() { return true; }
+#endif
+
+
 #undef __
 #define __ masm->
 
@@ -3381,10 +4039,11 @@
 
 
 void Reference::GetValue(TypeofState typeof_state) {
+  ASSERT(!cgen_->in_spilled_code());
+  ASSERT(cgen_->HasValidEntryRegisters());
   ASSERT(!is_illegal());
   ASSERT(!cgen_->has_cc());
   MacroAssembler* masm = cgen_->masm();
-  VirtualFrame* frame = cgen_->frame();
   Property* property = expression_->AsProperty();
   if (property != NULL) {
     cgen_->CodeForSourcePosition(property->position());
@@ -3405,20 +4064,21 @@
       // there is a chance that reference errors can be thrown below, we
       // must distinguish between the two kinds of loads (typeof expression
       // loads must not throw a reference error).
+      VirtualFrame* frame = cgen_->frame();
       Comment cmnt(masm, "[ Load from named Property");
-      // Setup the name register.
       Handle<String> name(GetName());
-      __ mov(r2, Operand(name));
-      Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-
       Variable* var = expression_->AsVariableProxy()->AsVariable();
-      if (var != NULL) {
-        ASSERT(var->is_global());
-        __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-      } else {
-        __ Call(ic, RelocInfo::CODE_TARGET);
-      }
-      frame->Push(r0);
+      Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+      // Setup the name register.
+      Result name_reg = cgen_->allocator()->Allocate(r2);
+      ASSERT(name_reg.is_valid());
+      __ mov(name_reg.reg(), Operand(name));
+      ASSERT(var == NULL || var->is_global());
+      RelocInfo::Mode rmode = (var == NULL)
+                            ? RelocInfo::CODE_TARGET
+                            : RelocInfo::CODE_TARGET_CONTEXT;
+      Result answer = frame->CallCodeObject(ic, rmode, &name_reg, 0);
+      frame->EmitPush(answer.reg());
       break;
     }
 
@@ -3428,18 +4088,17 @@
 
       // TODO(181): Implement inlined version of array indexing once
       // loop nesting is properly tracked on ARM.
+      VirtualFrame* frame = cgen_->frame();
       Comment cmnt(masm, "[ Load from keyed Property");
       ASSERT(property != NULL);
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-
       Variable* var = expression_->AsVariableProxy()->AsVariable();
-      if (var != NULL) {
-        ASSERT(var->is_global());
-        __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-      } else {
-        __ Call(ic, RelocInfo::CODE_TARGET);
-      }
-      frame->Push(r0);
+      ASSERT(var == NULL || var->is_global());
+      RelocInfo::Mode rmode = (var == NULL)
+                            ? RelocInfo::CODE_TARGET
+                            : RelocInfo::CODE_TARGET_CONTEXT;
+      Result answer = frame->CallCodeObject(ic, rmode, 0);
+      frame->EmitPush(answer.reg());
       break;
     }
 
@@ -3468,9 +4127,9 @@
         ASSERT(slot->var()->is_dynamic());
 
         // For now, just do a runtime call.
-        frame->Push(cp);
+        frame->EmitPush(cp);
         __ mov(r0, Operand(slot->var()->name()));
-        frame->Push(r0);
+        frame->EmitPush(r0);
 
         if (init_state == CONST_INIT) {
           // Same as the case for a normal store, but ignores attribute
@@ -3488,18 +4147,18 @@
           // and when the expression operands are defined and valid, and
           // thus we need the split into 2 operations: declaration of the
           // context slot followed by initialization.
-          __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+          frame->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
         } else {
-          __ CallRuntime(Runtime::kStoreContextSlot, 3);
+          frame->CallRuntime(Runtime::kStoreContextSlot, 3);
         }
         // Storing a variable must keep the (new) value on the expression
         // stack. This is necessary for compiling assignment expressions.
-        frame->Push(r0);
+        frame->EmitPush(r0);
 
       } else {
         ASSERT(!slot->var()->is_dynamic());
 
-        Label exit;
+        JumpTarget exit(cgen_);
         if (init_state == CONST_INIT) {
           ASSERT(slot->var()->mode() == Variable::CONST);
           // Only the first const initialization must be executed (the slot
@@ -3508,7 +4167,7 @@
           Comment cmnt(masm, "[ Init const");
           __ ldr(r2, cgen_->SlotOperand(slot, r2));
           __ cmp(r2, Operand(Factory::the_hole_value()));
-          __ b(ne, &exit);
+          exit.Branch(ne);
         }
 
         // We must execute the store.  Storing a variable must keep the
@@ -3520,13 +4179,13 @@
         // initialize consts to 'the hole' value and by doing so, end up
         // calling this code.  r2 may be loaded with context; used below in
         // RecordWrite.
-        frame->Pop(r0);
+        frame->EmitPop(r0);
         __ str(r0, cgen_->SlotOperand(slot, r2));
-        frame->Push(r0);
+        frame->EmitPush(r0);
         if (slot->type() == Slot::CONTEXT) {
           // Skip write barrier if the written value is a smi.
           __ tst(r0, Operand(kSmiTagMask));
-          __ b(eq, &exit);
+          exit.Branch(eq);
           // r2 is loaded with context when calling SlotOperand above.
           int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
           __ mov(r3, Operand(offset));
@@ -3536,7 +4195,7 @@
         // to bind the exit label.  Doing so can defeat peephole
         // optimization.
         if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
-          __ bind(&exit);
+          exit.Bind();
         }
       }
       break;
@@ -3545,13 +4204,23 @@
     case NAMED: {
       Comment cmnt(masm, "[ Store to named Property");
       // Call the appropriate IC code.
-      frame->Pop(r0);  // value
-      // Setup the name register.
-      Handle<String> name(GetName());
-      __ mov(r2, Operand(name));
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      __ Call(ic, RelocInfo::CODE_TARGET);
-      frame->Push(r0);
+      Handle<String> name(GetName());
+
+      Result value = cgen_->allocator()->Allocate(r0);
+      ASSERT(value.is_valid());
+      frame->EmitPop(value.reg());
+
+      // Setup the name register.
+      Result property_name = cgen_->allocator()->Allocate(r2);
+      ASSERT(property_name.is_valid());
+      __ mov(property_name.reg(), Operand(name));
+      Result answer = frame->CallCodeObject(ic,
+                                            RelocInfo::CODE_TARGET,
+                                            &value,
+                                            &property_name,
+                                            0);
+      frame->EmitPush(answer.reg());
       break;
     }
 
@@ -3564,9 +4233,12 @@
       // Call IC code.
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       // TODO(1222589): Make the IC grab the values from the stack.
-      frame->Pop(r0);  // value
-      __ Call(ic, RelocInfo::CODE_TARGET);
-      frame->Push(r0);
+      Result value = cgen_->allocator()->Allocate(r0);
+      ASSERT(value.is_valid());
+      frame->EmitPop(value.reg());  // value
+      Result result =
+          frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
+      frame->EmitPush(result.reg());
       break;
     }
 
diff --git a/src/codegen-arm.h b/src/codegen-arm.h
index 4ab5ce6..29fe2d4 100644
--- a/src/codegen-arm.h
+++ b/src/codegen-arm.h
@@ -43,57 +43,6 @@
 
 
 // -------------------------------------------------------------------------
-// Virtual frame
-
-class VirtualFrame BASE_EMBEDDED {
- public:
-  explicit VirtualFrame(CodeGenerator* cgen);
-
-  void Enter();
-  void Exit();
-
-  void AllocateLocals();
-
-  MemOperand Top() const { return MemOperand(sp, 0); }
-
-  MemOperand Element(int index) const {
-    return MemOperand(sp, index * kPointerSize);
-  }
-
-  MemOperand Local(int index) const {
-    ASSERT(0 <= index && index < frame_local_count_);
-    return MemOperand(fp, kLocal0Offset - index * kPointerSize);
-  }
-
-  MemOperand Function() const { return MemOperand(fp, kFunctionOffset); }
-
-  MemOperand Context() const { return MemOperand(fp, kContextOffset); }
-
-  MemOperand Parameter(int index) const {
-    // Index -1 corresponds to the receiver.
-    ASSERT(-1 <= index && index <= parameter_count_);
-    return MemOperand(fp, (1 + parameter_count_ - index) * kPointerSize);
-  }
-
-  inline void Drop(int count);
-
-  inline void Pop();
-  inline void Pop(Register reg);
-
-  inline void Push(Register reg);
-
- private:
-  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
-  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
-  static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
-  MacroAssembler* masm_;
-  int frame_local_count_;
-  int parameter_count_;
-};
-
-
-// -------------------------------------------------------------------------
 // Reference support
 
 // A reference is a C++ stack-allocated object that keeps an ECMA
@@ -132,6 +81,11 @@
   // the expression stack, and it is left in place with its value above it.
   void GetValue(TypeofState typeof_state);
 
+  // Generate code to push the value of a reference on top of the expression
+  // stack and then spill the stack frame.  This function is used temporarily
+  // while the code generator is being transformed.
+  inline void GetValueAndSpill(TypeofState typeof_state);
+
   // Generate code to store the value on top of the expression stack in the
   // reference.  The reference is expected to be immediately below the value
   // on the expression stack.  The stored value is left in place (with the
@@ -164,22 +118,22 @@
   // labels.
   CodeGenState(CodeGenerator* owner,
                TypeofState typeof_state,
-               Label* true_target,
-               Label* false_target);
+               JumpTarget* true_target,
+               JumpTarget* false_target);
 
   // Destroy a code generator state and restore the owning code generator's
   // previous state.
   ~CodeGenState();
 
   TypeofState typeof_state() const { return typeof_state_; }
-  Label* true_target() const { return true_target_; }
-  Label* false_target() const { return false_target_; }
+  JumpTarget* true_target() const { return true_target_; }
+  JumpTarget* false_target() const { return false_target_; }
 
  private:
   CodeGenerator* owner_;
   TypeofState typeof_state_;
-  Label* true_target_;
-  Label* false_target_;
+  JumpTarget* true_target_;
+  JumpTarget* false_target_;
   CodeGenState* previous_;
 };
 
@@ -213,11 +167,26 @@
 
   VirtualFrame* frame() const { return frame_; }
 
+  bool has_valid_frame() const { return frame_ != NULL; }
+
+  // Set the virtual frame to be new_frame, with non-frame register
+  // reference counts given by non_frame_registers.  The non-frame
+  // register reference counts of the old frame are returned in
+  // non_frame_registers.
+  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
+
+  void DeleteFrame();
+
+  RegisterAllocator* allocator() const { return allocator_; }
+
   CodeGenState* state() { return state_; }
   void set_state(CodeGenState* state) { state_ = state; }
 
   void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
 
+  bool in_spilled_code() const { return in_spilled_code_; }
+  void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
+
  private:
   // Construction/Destruction
   CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
@@ -226,6 +195,8 @@
   // Accessors
   Scope* scope() const { return scope_; }
 
+  // Clearing and generating deferred code.
+  void ClearDeferred();
   void ProcessDeferred();
 
   bool is_eval() { return is_eval_; }
@@ -233,16 +204,44 @@
   // State
   bool has_cc() const  { return cc_reg_ != al; }
   TypeofState typeof_state() const { return state_->typeof_state(); }
-  Label* true_target() const  { return state_->true_target(); }
-  Label* false_target() const  { return state_->false_target(); }
+  JumpTarget* true_target() const  { return state_->true_target(); }
+  JumpTarget* false_target() const  { return state_->false_target(); }
 
 
   // Node visitors.
+  void VisitStatements(ZoneList<Statement*>* statements);
+
 #define DEF_VISIT(type) \
   void Visit##type(type* node);
   NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
+  // Visit a statement and then spill the virtual frame if control flow can
+  // reach the end of the statement (ie, it does not exit via break,
+  // continue, return, or throw).  This function is used temporarily while
+  // the code generator is being transformed.
+  void VisitAndSpill(Statement* statement) {
+    ASSERT(in_spilled_code());
+    set_in_spilled_code(false);
+    Visit(statement);
+    if (frame_ != NULL) {
+      frame_->SpillAll();
+    }
+    set_in_spilled_code(true);
+  }
+
+  // Visit a list of statements and then spill the virtual frame if control
+  // flow can reach the end of the list.
+  void VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+    ASSERT(in_spilled_code());
+    set_in_spilled_code(false);
+    VisitStatements(statements);
+    if (frame_ != NULL) {
+      frame_->SpillAll();
+    }
+    set_in_spilled_code(true);
+  }
+
   // Main code generation function
   void GenCode(FunctionLiteral* fun);
 
@@ -259,7 +258,7 @@
   MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
                                                Register tmp,
                                                Register tmp2,
-                                               Label* slow);
+                                               JumpTarget* slow);
 
   // Expressions
   MemOperand GlobalObject() const  {
@@ -268,20 +267,50 @@
 
   void LoadCondition(Expression* x,
                      TypeofState typeof_state,
-                     Label* true_target,
-                     Label* false_target,
+                     JumpTarget* true_target,
+                     JumpTarget* false_target,
                      bool force_cc);
   void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
   void LoadGlobal();
   void LoadGlobalReceiver(Register scratch);
 
+  // Generate code to push the value of an expression on top of the frame
+  // and then spill the frame fully to memory.  This function is used
+  // temporarily while the code generator is being transformed.
+  void LoadAndSpill(Expression* expression,
+                    TypeofState typeof_state = NOT_INSIDE_TYPEOF) {
+    ASSERT(in_spilled_code());
+    set_in_spilled_code(false);
+    Load(expression, typeof_state);
+    frame_->SpillAll();
+    set_in_spilled_code(true);
+  }
+
+  // Call LoadCondition and then spill the virtual frame unless control flow
+  // cannot reach the end of the expression (ie, by emitting only
+  // unconditional jumps to the control targets).
+  void LoadConditionAndSpill(Expression* expression,
+                             TypeofState typeof_state,
+                             JumpTarget* true_target,
+                             JumpTarget* false_target,
+                             bool force_control) {
+    ASSERT(in_spilled_code());
+    set_in_spilled_code(false);
+    LoadCondition(expression, typeof_state, true_target, false_target,
+                  force_control);
+    if (frame_ != NULL) {
+      frame_->SpillAll();
+    }
+    set_in_spilled_code(true);
+  }
+
   // Read a value from a slot and leave it on top of the expression stack.
   void LoadFromSlot(Slot* slot, TypeofState typeof_state);
   void LoadFromGlobalSlotCheckExtensions(Slot* slot,
                                          TypeofState typeof_state,
                                          Register tmp,
                                          Register tmp2,
-                                         Label* slow);
+                                         JumpTarget* slow);
 
   // Special code for typeof expressions: Unfortunately, we must
   // be careful when loading the expression in 'typeof'
@@ -291,7 +320,7 @@
   // through the context chain.
   void LoadTypeofExpression(Expression* x);
 
-  void ToBoolean(Label* true_target, Label* false_target);
+  void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
 
   void GenericBinaryOperation(Token::Value op);
   void Comparison(Condition cc, bool strict = false);
@@ -301,9 +330,8 @@
   void CallWithArguments(ZoneList<Expression*>* arguments, int position);
 
   // Control flow
-  void Branch(bool if_true, Label* L);
+  void Branch(bool if_true, JumpTarget* target);
   void CheckStack();
-  void CleanStack(int num_bytes);
 
   bool CheckForInlineRuntimeCall(CallRuntime* node);
   Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
@@ -371,14 +399,15 @@
   void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
                                        int min_index,
                                        int range,
-                                       Label* fail_label,
+                                       Label* default_label,
                                        Vector<Label*> case_targets,
                                        Vector<Label> case_labels);
 
   // Generate the code for cases for the fast case switch.
   // Called by GenerateFastCaseSwitchJumpTable.
   void GenerateFastCaseSwitchCases(SwitchStatement* node,
-                                   Vector<Label> case_labels);
+                                   Vector<Label> case_labels,
+                                   VirtualFrame* start_frame);
 
   // Fast support for constant-Smi switches.
   void GenerateFastCaseSwitchStatement(SwitchStatement* node,
@@ -395,10 +424,17 @@
   // Methods used to indicate which source code is generated for. Source
   // positions are collected by the assembler and emitted with the relocation
   // information.
-  void CodeForStatement(Node* node);
+  void CodeForFunctionPosition(FunctionLiteral* fun);
+  void CodeForStatementPosition(Node* node);
   void CodeForSourcePosition(int pos);
 
+#ifdef DEBUG
+  // True if the registers are valid for entry to a block.
+  bool HasValidEntryRegisters();
+#endif
+
   bool is_eval_;  // Tells whether code is generated for eval.
+
   Handle<Script> script_;
   List<DeferredCode*> deferred_;
 
@@ -408,20 +444,41 @@
   // Code generation state
   Scope* scope_;
   VirtualFrame* frame_;
+  RegisterAllocator* allocator_;
   Condition cc_reg_;
   CodeGenState* state_;
-  bool is_inside_try_;
-  int break_stack_height_;
 
-  // Labels
-  Label function_return_;
+  // Jump targets
+  BreakTarget function_return_;
+
+  // True if the function return is shadowed (ie, jumping to the target
+  // function_return_ does not jump to the true function return, but rather
+  // to some unlinking code).
+  bool function_return_is_shadowed_;
+
+  // True when we are in code that expects the virtual frame to be fully
+  // spilled.  Some virtual frame function are disabled in DEBUG builds when
+  // called from spilled code, because they do not leave the virtual frame
+  // in a spilled state.
+  bool in_spilled_code_;
 
   friend class VirtualFrame;
+  friend class JumpTarget;
   friend class Reference;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
+
+void Reference::GetValueAndSpill(TypeofState typeof_state) {
+  ASSERT(cgen_->in_spilled_code());
+  cgen_->set_in_spilled_code(false);
+  GetValue(typeof_state);
+  cgen_->frame()->SpillAll();
+  cgen_->set_in_spilled_code(true);
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_CODEGEN_ARM_H_
diff --git a/src/codegen-ia32.cc b/src/codegen-ia32.cc
index 5bdeb00..390f25c 100644
--- a/src/codegen-ia32.cc
+++ b/src/codegen-ia32.cc
@@ -32,109 +32,19 @@
 #include "debug.h"
 #include "scopes.h"
 #include "runtime.h"
+#include "parser.h"
 
 namespace v8 { namespace internal {
 
 #define __ masm_->
 
 // -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-VirtualFrame::VirtualFrame(CodeGenerator* cgen) {
-  ASSERT(cgen->scope() != NULL);
-
-  masm_ = cgen->masm();
-  frame_local_count_ = cgen->scope()->num_stack_slots();
-  parameter_count_ = cgen->scope()->num_parameters();
-}
-
-
-void VirtualFrame::Enter() {
-  Comment cmnt(masm_, "[ Enter JS frame");
-  __ push(ebp);
-  __ mov(ebp, Operand(esp));
-
-  // Store the context and the function in the frame.
-  __ push(esi);
-  __ push(edi);
-
-  // Clear the function slot when generating debug code.
-  if (FLAG_debug_code) {
-    __ Set(edi, Immediate(reinterpret_cast<int>(kZapValue)));
-  }
-}
-
-
-void VirtualFrame::Exit() {
-  Comment cmnt(masm_, "[ Exit JS frame");
-  // Record the location of the JS exit code for patching when setting
-  // break point.
-  __ RecordJSReturn();
-
-  // Avoid using the leave instruction here, because it is too
-  // short. We need the return sequence to be a least the size of a
-  // call instruction to support patching the exit code in the
-  // debugger. See VisitReturnStatement for the full return sequence.
-  __ mov(esp, Operand(ebp));
-  __ pop(ebp);
-}
-
-
-void VirtualFrame::AllocateLocals() {
-  if (frame_local_count_ > 0) {
-    Comment cmnt(masm_, "[ Allocate space for locals");
-    __ Set(eax, Immediate(Factory::undefined_value()));
-    for (int i = 0; i < frame_local_count_; i++) {
-      __ push(eax);
-    }
-  }
-}
-
-
-void VirtualFrame::Drop(int count) {
-  ASSERT(count >= 0);
-  if (count > 0) {
-    __ add(Operand(esp), Immediate(count * kPointerSize));
-  }
-}
-
-
-void VirtualFrame::Pop() { Drop(1); }
-
-
-void VirtualFrame::Pop(Register reg) {
-  __ pop(reg);
-}
-
-
-void VirtualFrame::Pop(Operand operand) {
-  __ pop(operand);
-}
-
-
-void VirtualFrame::Push(Register reg) {
-  __ push(reg);
-}
-
-
-void VirtualFrame::Push(Operand operand) {
-  __ push(operand);
-}
-
-
-void VirtualFrame::Push(Immediate immediate) {
-  __ push(immediate);
-}
-
-
-// -------------------------------------------------------------------------
 // CodeGenState implementation.
 
 CodeGenState::CodeGenState(CodeGenerator* owner)
     : owner_(owner),
       typeof_state_(NOT_INSIDE_TYPEOF),
-      true_target_(NULL),
-      false_target_(NULL),
+      destination_(NULL),
       previous_(NULL) {
   owner_->set_state(this);
 }
@@ -142,12 +52,10 @@
 
 CodeGenState::CodeGenState(CodeGenerator* owner,
                            TypeofState typeof_state,
-                           Label* true_target,
-                           Label* false_target)
+                           ControlDestination* destination)
     : owner_(owner),
       typeof_state_(typeof_state),
-      true_target_(true_target),
-      false_target_(false_target),
+      destination_(destination),
       previous_(owner->state()) {
   owner_->set_state(this);
 }
@@ -170,33 +78,35 @@
       masm_(new MacroAssembler(NULL, buffer_size)),
       scope_(NULL),
       frame_(NULL),
-      cc_reg_(no_condition),
+      allocator_(NULL),
       state_(NULL),
-      is_inside_try_(false),
-      break_stack_height_(0),
-      loop_nesting_(0) {
+      loop_nesting_(0),
+      function_return_is_shadowed_(false),
+      in_spilled_code_(false) {
 }
 
 
 // Calling conventions:
-// ebp: frame pointer
+// ebp: caller's frame pointer
 // esp: stack pointer
-// edi: caller's parameter pointer
+// edi: called JS function
 // esi: callee's context
 
 void CodeGenerator::GenCode(FunctionLiteral* fun) {
   // Record the position for debugging purposes.
-  CodeForSourcePosition(fun->start_position());
+  CodeForFunctionPosition(fun);
 
   ZoneList<Statement*>* body = fun->body();
 
   // Initialize state.
   ASSERT(scope_ == NULL);
   scope_ = fun->scope();
+  ASSERT(allocator_ == NULL);
+  RegisterAllocator register_allocator(this);
+  allocator_ = &register_allocator;
   ASSERT(frame_ == NULL);
-  VirtualFrame virtual_frame(this);
-  frame_ = &virtual_frame;
-  cc_reg_ = no_condition;
+  frame_ = new VirtualFrame(this);
+  set_in_spilled_code(false);
 
   // Adjust for function-level loop nesting.
   loop_nesting_ += fun->loop_nesting();
@@ -204,73 +114,57 @@
   {
     CodeGenState state(this);
 
-    // Entry
-    // stack: function, receiver, arguments, return address
+    // Entry:
+    // Stack: receiver, arguments, return address.
+    // ebp: caller's frame pointer
     // esp: stack pointer
-    // ebp: frame pointer
-    // edi: caller's parameter pointer
+    // edi: called JS function
     // esi: callee's context
-
+    allocator_->Initialize();
     frame_->Enter();
-    // tos: code slot
+
 #ifdef DEBUG
     if (strlen(FLAG_stop_at) > 0 &&
         fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      frame_->SpillAll();
       __ int3();
     }
 #endif
 
-    // This section now only allocates and copies the formals into the
-    // arguments object. It saves the address in ecx, which is saved
-    // at any point before either garbage collection or ecx is
-    // overwritten.  The flag arguments_array_allocated communicates
-    // with the store into the arguments variable and guards the lazy
-    // pushes of ecx to TOS.  The flag arguments_array_saved notes
-    // when the push has happened.
-    bool arguments_object_allocated = false;
-    bool arguments_object_saved = false;
+    // Allocate space for locals and initialize them.
+    frame_->AllocateStackSlots(scope_->num_stack_slots());
+    // Initialize the function return target after the locals are set
+    // up, because it needs the expected frame height from the frame.
+    function_return_.Initialize(this, JumpTarget::BIDIRECTIONAL);
+    function_return_is_shadowed_ = false;
 
-    // Allocate arguments object.
-    // The arguments object pointer needs to be saved in ecx, since we need
-    // to store arguments into the context.
+    // Allocate the arguments object and copy the parameters into it.
     if (scope_->arguments() != NULL) {
       ASSERT(scope_->arguments_shadow() != NULL);
-      Comment cmnt(masm_, "[ allocate arguments object");
+      Comment cmnt(masm_, "[ Allocate arguments object");
       ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
-      __ lea(eax, frame_->Receiver());
-      frame_->Push(frame_->Function());
-      frame_->Push(eax);
-      frame_->Push(Immediate(Smi::FromInt(scope_->num_parameters())));
-      __ CallStub(&stub);
-      __ mov(ecx, Operand(eax));
-      arguments_object_allocated = true;
+      frame_->PushFunction();
+      frame_->PushReceiverSlotAddress();
+      frame_->Push(Smi::FromInt(scope_->num_parameters()));
+      Result answer = frame_->CallStub(&stub, 3);
+      frame_->Push(&answer);
     }
 
-    // Allocate space for locals and initialize them.
-    frame_->AllocateLocals();
-
     if (scope_->num_heap_slots() > 0) {
       Comment cmnt(masm_, "[ allocate local context");
-      // Save the arguments object pointer, if any.
-      if (arguments_object_allocated && !arguments_object_saved) {
-        frame_->Push(ecx);
-        arguments_object_saved = true;
-      }
       // Allocate local context.
       // Get outer context and create a new context based on it.
-      frame_->Push(frame_->Function());
-      __ CallRuntime(Runtime::kNewContext, 1);  // eax holds the result
+      frame_->PushFunction();
+      Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
 
-      if (kDebug) {
-        Label verified_true;
-        // Verify eax and esi are the same in debug mode
-        __ cmp(eax, Operand(esi));
-        __ j(equal, &verified_true);
-        __ int3();
-        __ bind(&verified_true);
-      }
       // Update context local.
-      __ mov(frame_->Context(), esi);
+      frame_->SaveContextRegister();
+
+      // Verify that the runtime call result and esi agree.
+      if (FLAG_debug_code) {
+        __ cmp(context.reg(), Operand(esi));
+        __ Assert(equal, "Runtime::NewContext should end up in esi");
+      }
     }
 
     // TODO(1241774): Improve this code:
@@ -292,17 +186,26 @@
         Variable* par = scope_->parameter(i);
         Slot* slot = par->slot();
         if (slot != NULL && slot->type() == Slot::CONTEXT) {
-          // Save the arguments object pointer, if any.
-          if (arguments_object_allocated && !arguments_object_saved) {
-            frame_->Push(ecx);
-            arguments_object_saved = true;
-          }
-          ASSERT(!scope_->is_global_scope());  // no parameters in global scope
-          __ mov(eax, frame_->Parameter(i));
-          // Loads ecx with context; used below in RecordWrite.
-          __ mov(SlotOperand(slot, ecx), eax);
+          // The use of SlotOperand below is safe in unspilled code
+          // because the slot is guaranteed to be a context slot.
+          //
+          // There are no parameters in the global scope.
+          ASSERT(!scope_->is_global_scope());
+          frame_->PushParameterAt(i);
+          Result value = frame_->Pop();
+          value.ToRegister();
+
+          // SlotOperand loads context.reg() with the context object
+          // stored to, used below in RecordWrite.
+          Result context = allocator_->Allocate();
+          ASSERT(context.is_valid());
+          __ mov(SlotOperand(slot, context.reg()), value.reg());
           int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-          __ RecordWrite(ecx, offset, eax, ebx);
+          Result scratch = allocator_->Allocate();
+          ASSERT(scratch.is_valid());
+          frame_->Spill(context.reg());
+          frame_->Spill(value.reg());
+          __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
         }
       }
     }
@@ -314,31 +217,20 @@
     // Store the arguments object.  This must happen after context
     // initialization because the arguments object may be stored in the
     // context.
-    if (arguments_object_allocated) {
-      ASSERT(scope_->arguments() != NULL);
-      ASSERT(scope_->arguments_shadow() != NULL);
+    if (scope_->arguments() != NULL) {
       Comment cmnt(masm_, "[ store arguments object");
       { Reference shadow_ref(this, scope_->arguments_shadow());
         ASSERT(shadow_ref.is_slot());
         { Reference arguments_ref(this, scope_->arguments());
           ASSERT(arguments_ref.is_slot());
-          // If the newly-allocated arguments object is already on the
-          // stack, we make use of the convenient property that references
-          // representing slots take up no space on the expression stack
-          // (ie, it doesn't matter that the stored value is actually below
-          // the reference).
-          //
-          // If the newly-allocated argument object is not already on
-          // the stack, we rely on the property that loading a
-          // zero-sized reference will not clobber the ecx register.
-          if (!arguments_object_saved) {
-            frame_->Push(ecx);
-          }
+          // Here we rely on the convenient property that references to slot
+          // take up zero space in the frame (ie, it doesn't matter that the
+          // stored value is actually below the reference on the frame).
           arguments_ref.SetValue(NOT_CONST_INIT);
         }
         shadow_ref.SetValue(NOT_CONST_INIT);
       }
-      frame_->Pop();  // Value is no longer needed.
+      frame_->Drop();  // Value is no longer needed.
     }
 
     // Generate code to 'execute' declarations and initialize functions
@@ -356,7 +248,7 @@
     }
 
     if (FLAG_trace) {
-      __ CallRuntime(Runtime::kTraceEnter, 0);
+      frame_->CallRuntime(Runtime::kTraceEnter, 0);
       // Ignore the return value.
     }
     CheckStack();
@@ -371,18 +263,35 @@
       bool should_trace =
           is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
       if (should_trace) {
-        __ CallRuntime(Runtime::kDebugTrace, 0);
+        frame_->CallRuntime(Runtime::kDebugTrace, 0);
         // Ignore the return value.
       }
 #endif
       VisitStatements(body);
 
-      // Generate a return statement if necessary.
-      if (body->is_empty() || body->last()->AsReturnStatement() == NULL) {
+      // Handle the return from the function.
+      if (has_valid_frame()) {
+        // If there is a valid frame, control flow can fall off the end of
+        // the body.  In that case there is an implicit return statement.
+        // Compiling a return statement will jump to the return sequence if
+        // it is already generated or generate it if not.
+        ASSERT(!function_return_is_shadowed_);
         Literal undefined(Factory::undefined_value());
         ReturnStatement statement(&undefined);
         statement.set_statement_pos(fun->end_position());
         VisitReturnStatement(&statement);
+      } else if (function_return_.is_linked()) {
+        // If the return target has dangling jumps to it, then we have not
+        // yet generated the return sequence.  This can happen when (a)
+        // control does not flow off the end of the body so we did not
+        // compile an artificial return statement just above, and (b) there
+        // are return statements in the body but (c) they are all shadowed.
+        //
+        // There is no valid frame here but it is safe (also necessary) to
+        // load the return value into eax.
+        __ mov(eax, Immediate(Factory::undefined_value()));
+        function_return_.Bind();
+        GenerateReturnSequence();
       }
     }
   }
@@ -391,11 +300,23 @@
   loop_nesting_ -= fun->loop_nesting();
 
   // Code generation state must be reset.
-  scope_ = NULL;
-  frame_ = NULL;
-  ASSERT(!has_cc());
   ASSERT(state_ == NULL);
   ASSERT(loop_nesting() == 0);
+  ASSERT(!function_return_is_shadowed_);
+  function_return_.Unuse();
+  DeleteFrame();
+
+  // Process any deferred code using the register allocator.
+  if (HasStackOverflow()) {
+    ClearDeferred();
+  } else {
+    ProcessDeferred();
+  }
+
+  // There is no need to delete the register allocator, it is a
+  // stack-allocated local.
+  allocator_ = NULL;
+  scope_ = NULL;
 }
 
 
@@ -412,10 +333,10 @@
   int index = slot->index();
   switch (slot->type()) {
     case Slot::PARAMETER:
-      return frame_->Parameter(index);
+      return frame_->ParameterAt(index);
 
     case Slot::LOCAL:
-      return frame_->Local(index);
+      return frame_->LocalAt(index);
 
     case Slot::CONTEXT: {
       // Follow the context chain if necessary.
@@ -451,116 +372,154 @@
 
 
 Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
-                                                         Register tmp,
-                                                         Label* slow) {
+                                                         Result tmp,
+                                                         JumpTarget* slow) {
   ASSERT(slot->type() == Slot::CONTEXT);
-  int index = slot->index();
-  Register context = esi;
+  ASSERT(tmp.is_register());
+  Result context(esi, this);
+
   for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
       if (s->calls_eval()) {
         // Check that extension is NULL.
-        __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
-        __ j(not_equal, slow, not_taken);
+        __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX),
+               Immediate(0));
+        slow->Branch(not_equal, not_taken);
       }
-      __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
-      __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+      __ mov(tmp.reg(), ContextOperand(context.reg(), Context::CLOSURE_INDEX));
+      __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
       context = tmp;
     }
   }
   // Check that last extension is NULL.
-  __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
-  __ j(not_equal, slow, not_taken);
-  __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
-  return ContextOperand(tmp, index);
+  __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX),
+         Immediate(0));
+  slow->Branch(not_equal, not_taken);
+  __ mov(tmp.reg(), ContextOperand(context.reg(), Context::FCONTEXT_INDEX));
+  return ContextOperand(tmp.reg(), slot->index());
 }
 
 
-
-// Loads a value on TOS. If it is a boolean value, the result may have been
-// (partially) translated into branches, or it may have set the condition
-// code register. If force_cc is set, the value is forced to set the
-// condition code register and no value is pushed. If the condition code
-// register was set, has_cc() is true and cc_reg_ contains the condition to
-// test for 'true'.
+// Emit code to load the value of an expression to the top of the
+// frame. If the expression is boolean-valued it may be compiled (or
+// partially compiled) into control flow to the control destination.
+// If force_control is true, control flow is forced.
 void CodeGenerator::LoadCondition(Expression* x,
                                   TypeofState typeof_state,
-                                  Label* true_target,
-                                  Label* false_target,
-                                  bool force_cc) {
-  ASSERT(!has_cc());
+                                  ControlDestination* dest,
+                                  bool force_control) {
+  ASSERT(!in_spilled_code());
+  int original_height = frame_->height();
 
-  { CodeGenState new_state(this, typeof_state, true_target, false_target);
+  { CodeGenState new_state(this, typeof_state, dest);
     Visit(x);
+
+    // If we hit a stack overflow, we may not have actually visited
+    // the expression.  In that case, we ensure that we have a
+    // valid-looking frame state because we will continue to generate
+    // code as we unwind the C++ stack.
+    //
+    // It's possible to have both a stack overflow and a valid frame
+    // state (eg, a subexpression overflowed, visiting it returned
+    // with a dummied frame state, and visiting this expression
+    // returned with a normal-looking state).
+    if (HasStackOverflow() &&
+        !dest->is_used() &&
+        frame_->height() == original_height) {
+      dest->Goto(true);
+    }
   }
-  if (force_cc && !has_cc()) {
-    // Convert the TOS value to a boolean in the condition code register.
-    // Visiting an expression may possibly choose neither (a) to leave a
-    // value in the condition code register nor (b) to leave a value in TOS
-    // (eg, by compiling to only jumps to the targets).  In that case the
-    // code generated by ToBoolean is wrong because it assumes the value of
-    // the expression in TOS.  So long as there is always a value in TOS or
-    // the condition code register when control falls through to here (there
-    // is), the code generated by ToBoolean is dead and therefore safe.
-    ToBoolean(true_target, false_target);
+
+  if (force_control && !dest->is_used()) {
+    // Convert the TOS value into flow to the control destination.
+    ToBoolean(dest);
   }
-  ASSERT(has_cc() || !force_cc);
+
+  ASSERT(!(force_control && !dest->is_used()));
+  ASSERT(dest->is_used() || frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
-  Label true_target;
-  Label false_target;
-  LoadCondition(x, typeof_state, &true_target, &false_target, false);
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  ASSERT(!in_spilled_code());
+  JumpTarget true_target(this);
+  JumpTarget false_target(this);
+  ControlDestination dest(&true_target, &false_target, true);
+  LoadCondition(x, typeof_state, &dest, false);
 
-  if (has_cc()) {
-    // convert cc_reg_ into a bool
-    Label loaded, materialize_true;
-    __ j(cc_reg_, &materialize_true);
-    frame_->Push(Immediate(Factory::false_value()));
-    __ jmp(&loaded);
-    __ bind(&materialize_true);
-    frame_->Push(Immediate(Factory::true_value()));
-    __ bind(&loaded);
-    cc_reg_ = no_condition;
-  }
-
-  if (true_target.is_linked() || false_target.is_linked()) {
-    // we have at least one condition value
-    // that has been "translated" into a branch,
-    // thus it needs to be loaded explicitly again
-    Label loaded;
-    __ jmp(&loaded);  // don't lose current TOS
-    bool both = true_target.is_linked() && false_target.is_linked();
-    // reincarnate "true", if necessary
+  if (dest.false_was_fall_through()) {
+    // The false target was just bound.
+    JumpTarget loaded(this);
+    frame_->Push(Factory::false_value());
+    // There may be dangling jumps to the true target.
     if (true_target.is_linked()) {
-      __ bind(&true_target);
-      frame_->Push(Immediate(Factory::true_value()));
+      loaded.Jump();
+      true_target.Bind();
+      frame_->Push(Factory::true_value());
+      loaded.Bind();
     }
-    // if both "true" and "false" need to be reincarnated,
-    // jump across code for "false"
-    if (both)
-      __ jmp(&loaded);
-    // reincarnate "false", if necessary
+
+  } else if (dest.is_used()) {
+    // There is true, and possibly false, control flow (with true as
+    // the fall through).
+    JumpTarget loaded(this);
+    frame_->Push(Factory::true_value());
     if (false_target.is_linked()) {
-      __ bind(&false_target);
-      frame_->Push(Immediate(Factory::false_value()));
+      loaded.Jump();
+      false_target.Bind();
+      frame_->Push(Factory::false_value());
+      loaded.Bind();
     }
-    // everything is loaded at this point
-    __ bind(&loaded);
+
+  } else {
+    // We have a valid value on top of the frame, but we still may
+    // have dangling jumps to the true and false targets from nested
+    // subexpressions (eg, the left subexpressions of the
+    // short-circuited boolean operators).
+    ASSERT(has_valid_frame());
+    if (true_target.is_linked() || false_target.is_linked()) {
+      JumpTarget loaded(this);
+      loaded.Jump();  // Don't lose the current TOS.
+      if (true_target.is_linked()) {
+        true_target.Bind();
+        frame_->Push(Factory::true_value());
+        if (false_target.is_linked()) {
+          loaded.Jump();
+        }
+      }
+      if (false_target.is_linked()) {
+        false_target.Bind();
+        frame_->Push(Factory::false_value());
+      }
+      loaded.Bind();
+    }
   }
-  ASSERT(!has_cc());
+
+  ASSERT(has_valid_frame());
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::LoadGlobal() {
-  frame_->Push(GlobalObject());
+  if (in_spilled_code()) {
+    frame_->EmitPush(GlobalObject());
+  } else {
+    Result temp = allocator_->Allocate();
+    __ mov(temp.reg(), GlobalObject());
+    frame_->Push(&temp);
+  }
 }
 
 
-void CodeGenerator::LoadGlobalReceiver(Register scratch) {
-  __ mov(scratch, GlobalObject());
-  frame_->Push(FieldOperand(scratch, GlobalObject::kGlobalReceiverOffset));
+void CodeGenerator::LoadGlobalReceiver() {
+  Result temp = allocator_->Allocate();
+  Register reg = temp.reg();
+  __ mov(reg, GlobalObject());
+  __ mov(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
+  frame_->Push(&temp);
 }
 
 
@@ -597,6 +556,12 @@
 
 
 void CodeGenerator::LoadReference(Reference* ref) {
+  // References are loaded from both spilled and unspilled code.  Set the
+  // state to unspilled to allow that (and explicitly spill after
+  // construction at the construction sites).
+  bool was_in_spilled_code = in_spilled_code_;
+  in_spilled_code_ = false;
+
   Comment cmnt(masm_, "[ LoadReference");
   Expression* e = ref->expression();
   Property* property = e->AsProperty();
@@ -633,23 +598,17 @@
   } else {
     // Anything else is a runtime error.
     Load(e);
-    __ CallRuntime(Runtime::kThrowReferenceError, 1);
+    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
   }
+
+  in_spilled_code_ = was_in_spilled_code;
 }
 
 
 void CodeGenerator::UnloadReference(Reference* ref) {
   // Pop a reference from the stack while preserving TOS.
   Comment cmnt(masm_, "[ UnloadReference");
-  int size = ref->size();
-  if (size == 1) {
-    frame_->Pop(eax);
-    __ mov(frame_->Top(), eax);
-  } else if (size > 1) {
-    frame_->Pop(eax);
-    frame_->Drop(size);
-    frame_->Push(eax);
-  }
+  frame_->Nip(ref->size());
 }
 
 
@@ -668,42 +627,41 @@
 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
 // convert it to a boolean in the condition code register or jump to
 // 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(Label* true_target, Label* false_target) {
+void CodeGenerator::ToBoolean(ControlDestination* dest) {
   Comment cmnt(masm_, "[ ToBoolean");
 
-  // The value to convert should be popped from the stack.
-  frame_->Pop(eax);
-
+  // The value to convert should be popped from the frame.
+  Result value = frame_->Pop();
+  value.ToRegister();
   // Fast case checks.
 
   // 'false' => false.
-  __ cmp(eax, Factory::false_value());
-  __ j(equal, false_target);
+  __ cmp(value.reg(), Factory::false_value());
+  dest->false_target()->Branch(equal);
 
   // 'true' => true.
-  __ cmp(eax, Factory::true_value());
-  __ j(equal, true_target);
+  __ cmp(value.reg(), Factory::true_value());
+  dest->true_target()->Branch(equal);
 
   // 'undefined' => false.
-  __ cmp(eax, Factory::undefined_value());
-  __ j(equal, false_target);
+  __ cmp(value.reg(), Factory::undefined_value());
+  dest->false_target()->Branch(equal);
 
   // Smi => false iff zero.
   ASSERT(kSmiTag == 0);
-  __ test(eax, Operand(eax));
-  __ j(zero, false_target);
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, true_target);
+  __ test(value.reg(), Operand(value.reg()));
+  dest->false_target()->Branch(zero);
+  __ test(value.reg(), Immediate(kSmiTagMask));
+  dest->true_target()->Branch(zero);
 
   // Call the stub for all other cases.
-  frame_->Push(eax);  // Undo the pop(eax) from above.
+  frame_->Push(&value);  // Undo the Pop() from above.
   ToBooleanStub stub;
-  __ CallStub(&stub);
-  // Convert the result (eax) to condition code.
-  __ test(eax, Operand(eax));
-
-  ASSERT(not_equal == not_zero);
-  cc_reg_ = not_equal;
+  Result temp = frame_->CallStub(&stub, 1);
+  // Convert the result to a condition code.
+  __ test(temp.reg(), Operand(temp.reg()));
+  temp.Unuse();
+  dest->Split(not_equal);
 }
 
 
@@ -780,17 +738,17 @@
 
 const char* GenericBinaryOpStub::GetName() {
   switch (op_) {
-  case Token::ADD: return "GenericBinaryOpStub_ADD";
-  case Token::SUB: return "GenericBinaryOpStub_SUB";
-  case Token::MUL: return "GenericBinaryOpStub_MUL";
-  case Token::DIV: return "GenericBinaryOpStub_DIV";
-  case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
-  case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
-  case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
-  case Token::SAR: return "GenericBinaryOpStub_SAR";
-  case Token::SHL: return "GenericBinaryOpStub_SHL";
-  case Token::SHR: return "GenericBinaryOpStub_SHR";
-  default:         return "GenericBinaryOpStub";
+    case Token::ADD: return "GenericBinaryOpStub_ADD";
+    case Token::SUB: return "GenericBinaryOpStub_SUB";
+    case Token::MUL: return "GenericBinaryOpStub_MUL";
+    case Token::DIV: return "GenericBinaryOpStub_DIV";
+    case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+    case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+    case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+    case Token::SAR: return "GenericBinaryOpStub_SAR";
+    case Token::SHL: return "GenericBinaryOpStub_SHL";
+    case Token::SHR: return "GenericBinaryOpStub_SHR";
+    default:         return "GenericBinaryOpStub";
   }
 }
 
@@ -801,27 +759,31 @@
                                 Token::Value op,
                                 OverwriteMode mode,
                                 GenericBinaryFlags flags)
-      : DeferredCode(generator), stub_(op, mode, flags) { }
-
-  void GenerateInlineCode() {
-    stub_.GenerateSmiCode(masm(), enter());
+      : DeferredCode(generator), stub_(op, mode, flags), op_(op) {
+    set_comment("[ DeferredInlineBinaryOperation");
   }
 
-  virtual void Generate() {
-    __ push(ebx);
-    __ CallStub(&stub_);
-    // We must preserve the eax value here, because it will be written
-    // to the top-of-stack element when getting back to the fast case
-    // code. See comment in GenericBinaryOperation where
-    // deferred->exit() is bound.
-    __ push(eax);
-  }
+  Result GenerateInlineCode();
+
+  virtual void Generate();
 
  private:
   GenericBinaryOpStub stub_;
+  Token::Value op_;
 };
 
 
+void DeferredInlineBinaryOperation::Generate() {
+  Result left(generator());
+  Result right(generator());
+  enter()->Bind(&left, &right);
+  generator()->frame()->Push(&left);
+  generator()->frame()->Push(&right);
+  Result answer = generator()->frame()->CallStub(&stub_, 2);
+  exit_.Jump(&answer);
+}
+
+
 void CodeGenerator::GenericBinaryOperation(Token::Value op,
                                            StaticType* type,
                                            OverwriteMode overwrite_mode) {
@@ -830,9 +792,7 @@
 
   if (op == Token::COMMA) {
     // Simply discard left value.
-    frame_->Pop(eax);
-    frame_->Pop();
-    frame_->Push(eax);
+    frame_->Nip(1);
     return;
   }
 
@@ -865,170 +825,219 @@
     // Create a new deferred code for the slow-case part.
     DeferredInlineBinaryOperation* deferred =
         new DeferredInlineBinaryOperation(this, op, overwrite_mode, flags);
-    // Fetch the operands from the stack.
-    frame_->Pop(ebx);  // get y
-    __ mov(eax, frame_->Top());  // get x
     // Generate the inline part of the code.
-    deferred->GenerateInlineCode();
-    // Put result back on the stack. It seems somewhat weird to let
-    // the deferred code jump back before the assignment to the frame
-    // top, but this is just to let the peephole optimizer get rid of
-    // more code.
-    __ bind(deferred->exit());
-    __ mov(frame_->Top(), eax);
+    // The operands are on the frame.
+    Result answer = deferred->GenerateInlineCode();
+    deferred->BindExit(&answer);
+    frame_->Push(&answer);
   } else {
     // Call the stub and push the result to the stack.
     GenericBinaryOpStub stub(op, overwrite_mode, flags);
-    __ CallStub(&stub);
-    frame_->Push(eax);
+    Result answer = frame_->CallStub(&stub, 2);
+    frame_->Push(&answer);
   }
 }
 
 
-class DeferredInlinedSmiOperation: public DeferredCode {
+class DeferredInlineSmiOperation: public DeferredCode {
  public:
-  DeferredInlinedSmiOperation(CodeGenerator* generator,
-                              Token::Value op, int value,
-                              OverwriteMode overwrite_mode) :
-      DeferredCode(generator), op_(op), value_(value),
-      overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlinedSmiOperation");
+  DeferredInlineSmiOperation(CodeGenerator* generator,
+                             Token::Value op,
+                             Smi* value,
+                             OverwriteMode overwrite_mode)
+      : DeferredCode(generator),
+        op_(op),
+        value_(value),
+        overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiOperation");
   }
-  virtual void Generate() {
-    __ push(eax);
-    __ push(Immediate(Smi::FromInt(value_)));
-    GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
-    __ CallStub(&igostub);
-  }
+
+  virtual void Generate();
 
  private:
   Token::Value op_;
-  int value_;
+  Smi* value_;
   OverwriteMode overwrite_mode_;
 };
 
 
-class DeferredInlinedSmiOperationReversed: public DeferredCode {
+void DeferredInlineSmiOperation::Generate() {
+  Result left(generator());
+  enter()->Bind(&left);
+  generator()->frame()->Push(&left);
+  generator()->frame()->Push(value_);
+  GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
+  Result answer = generator()->frame()->CallStub(&igostub, 2);
+  exit_.Jump(&answer);
+}
+
+
+class DeferredInlineSmiOperationReversed: public DeferredCode {
  public:
-  DeferredInlinedSmiOperationReversed(CodeGenerator* generator,
-                                      Token::Value op, int value,
-                                      OverwriteMode overwrite_mode) :
-      DeferredCode(generator), op_(op), value_(value),
-      overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlinedSmiOperationReversed");
+  DeferredInlineSmiOperationReversed(CodeGenerator* generator,
+                                     Token::Value op,
+                                     Smi* value,
+                                     OverwriteMode overwrite_mode)
+      : DeferredCode(generator),
+        op_(op),
+        value_(value),
+        overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiOperationReversed");
   }
-  virtual void Generate() {
-    __ push(Immediate(Smi::FromInt(value_)));
-    __ push(eax);
-    GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
-    __ CallStub(&igostub);
-  }
+
+  virtual void Generate();
 
  private:
   Token::Value op_;
-  int value_;
+  Smi* value_;
   OverwriteMode overwrite_mode_;
 };
 
 
-class DeferredInlinedSmiAdd: public DeferredCode {
+void DeferredInlineSmiOperationReversed::Generate() {
+  Result right(generator());
+  enter()->Bind(&right);
+  generator()->frame()->Push(value_);
+  generator()->frame()->Push(&right);
+  GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
+  Result answer = generator()->frame()->CallStub(&igostub, 2);
+  exit_.Jump(&answer);
+}
+
+
+class DeferredInlineSmiAdd: public DeferredCode {
  public:
-  DeferredInlinedSmiAdd(CodeGenerator* generator, int value,
-                        OverwriteMode overwrite_mode) :
-      DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlinedSmiAdd");
+  DeferredInlineSmiAdd(CodeGenerator* generator,
+                       Smi* value,
+                       OverwriteMode overwrite_mode)
+      : DeferredCode(generator),
+        value_(value),
+        overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiAdd");
   }
 
-  virtual void Generate() {
-    // Undo the optimistic add operation and call the shared stub.
-    Immediate immediate(Smi::FromInt(value_));
-    __ sub(Operand(eax), immediate);
-    __ push(eax);
-    __ push(immediate);
-    GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
-    __ CallStub(&igostub);
-  }
+  virtual void Generate();
 
  private:
-  int value_;
+  Smi* value_;
   OverwriteMode overwrite_mode_;
 };
 
 
-class DeferredInlinedSmiAddReversed: public DeferredCode {
+void DeferredInlineSmiAdd::Generate() {
+  // Undo the optimistic add operation and call the shared stub.
+  Result left(generator());  // Initially left + value_.
+  enter()->Bind(&left);
+  left.ToRegister();
+  generator()->frame()->Spill(left.reg());
+  __ sub(Operand(left.reg()), Immediate(value_));
+  generator()->frame()->Push(&left);
+  generator()->frame()->Push(value_);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+  Result answer = generator()->frame()->CallStub(&igostub, 2);
+  exit_.Jump(&answer);
+}
+
+
+class DeferredInlineSmiAddReversed: public DeferredCode {
  public:
-  DeferredInlinedSmiAddReversed(CodeGenerator* generator, int value,
-                        OverwriteMode overwrite_mode) :
-      DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlinedSmiAddReversed");
+  DeferredInlineSmiAddReversed(CodeGenerator* generator,
+                               Smi* value,
+                               OverwriteMode overwrite_mode)
+      : DeferredCode(generator),
+        value_(value),
+        overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiAddReversed");
   }
 
-  virtual void Generate() {
-    // Undo the optimistic add operation and call the shared stub.
-    Immediate immediate(Smi::FromInt(value_));
-    __ sub(Operand(eax), immediate);
-    __ push(immediate);
-    __ push(eax);
-    GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
-    __ CallStub(&igostub);
-  }
+  virtual void Generate();
 
  private:
-  int value_;
+  Smi* value_;
   OverwriteMode overwrite_mode_;
 };
 
 
-class DeferredInlinedSmiSub: public DeferredCode {
+void DeferredInlineSmiAddReversed::Generate() {
+  // Undo the optimistic add operation and call the shared stub.
+  Result right(generator());  // Initially value_ + right.
+  enter()->Bind(&right);
+  right.ToRegister();
+  generator()->frame()->Spill(right.reg());
+  __ sub(Operand(right.reg()), Immediate(value_));
+  generator()->frame()->Push(value_);
+  generator()->frame()->Push(&right);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
+  Result answer = generator()->frame()->CallStub(&igostub, 2);
+  exit_.Jump(&answer);
+}
+
+
+class DeferredInlineSmiSub: public DeferredCode {
  public:
-  DeferredInlinedSmiSub(CodeGenerator* generator, int value,
-                        OverwriteMode overwrite_mode) :
-      DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlinedSmiSub");
+  DeferredInlineSmiSub(CodeGenerator* generator,
+                       Smi* value,
+                       OverwriteMode overwrite_mode)
+      : DeferredCode(generator),
+        value_(value),
+        overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiSub");
   }
 
-  virtual void Generate() {
-    // Undo the optimistic sub operation and call the shared stub.
-    Immediate immediate(Smi::FromInt(value_));
-    __ add(Operand(eax), immediate);
-    __ push(eax);
-    __ push(immediate);
-    GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
-    __ CallStub(&igostub);
-  }
+  virtual void Generate();
 
  private:
-  int value_;
+  Smi* value_;
   OverwriteMode overwrite_mode_;
 };
 
 
-class DeferredInlinedSmiSubReversed: public DeferredCode {
+void DeferredInlineSmiSub::Generate() {
+  // Undo the optimistic sub operation and call the shared stub.
+  Result left(generator());  // Initially left - value_.
+  enter()->Bind(&left);
+  left.ToRegister();
+  generator()->frame()->Spill(left.reg());
+  __ add(Operand(left.reg()), Immediate(value_));
+  generator()->frame()->Push(&left);
+  generator()->frame()->Push(value_);
+  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
+  Result answer = generator()->frame()->CallStub(&igostub, 2);
+  exit_.Jump(&answer);
+}
+
+
+class DeferredInlineSmiSubReversed: public DeferredCode {
  public:
-  // tos_reg is used to save the TOS value before reversing the operands
-  // eax will contain the immediate value after undoing the optimistic sub.
-  DeferredInlinedSmiSubReversed(CodeGenerator* generator, Register tos_reg,
-                                OverwriteMode overwrite_mode) :
-      DeferredCode(generator), tos_reg_(tos_reg),
-      overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlinedSmiSubReversed");
+  DeferredInlineSmiSubReversed(CodeGenerator* generator,
+                               Smi* value,
+                               OverwriteMode overwrite_mode)
+      : DeferredCode(generator),
+        value_(value),
+        overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiSubReversed");
   }
 
-  virtual void Generate() {
-    // Undo the optimistic sub operation and call the shared stub.
-    __ add(eax, Operand(tos_reg_));
-    __ push(eax);
-    __ push(tos_reg_);
-    GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
-    __ CallStub(&igostub);
-  }
+  virtual void Generate();
 
  private:
-  Register tos_reg_;
+  Smi* value_;
   OverwriteMode overwrite_mode_;
 };
 
 
+void DeferredInlineSmiSubReversed::Generate() {
+  // Call the shared stub.
+  Result right(generator());
+  enter()->Bind(&right);
+  generator()->frame()->Push(value_);
+  generator()->frame()->Push(&right);
+  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
+  Result answer = generator()->frame()->CallStub(&igostub, 2);
+  exit_.Jump(&answer);
+}
+
+
 void CodeGenerator::SmiOperation(Token::Value op,
                                  StaticType* type,
                                  Handle<Object> value,
@@ -1045,125 +1054,163 @@
   // smi literal (multiply by 2, shift by 0, etc.).
 
   // Get the literal value.
-  int int_value = Smi::cast(*value)->value();
+  Smi* smi_value = Smi::cast(*value);
+  int int_value = smi_value->value();
   ASSERT(is_intn(int_value, kMaxSmiInlinedBits));
 
   switch (op) {
     case Token::ADD: {
       DeferredCode* deferred = NULL;
       if (!reversed) {
-        deferred = new DeferredInlinedSmiAdd(this, int_value, overwrite_mode);
+        deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode);
       } else {
-        deferred = new DeferredInlinedSmiAddReversed(this, int_value,
-                                                     overwrite_mode);
+        deferred = new DeferredInlineSmiAddReversed(this, smi_value,
+                                                    overwrite_mode);
       }
-      frame_->Pop(eax);
-      __ add(Operand(eax), Immediate(value));
-      __ j(overflow, deferred->enter(), not_taken);
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(not_zero, deferred->enter(), not_taken);
-      __ bind(deferred->exit());
-      frame_->Push(eax);
+      Result operand = frame_->Pop();
+      operand.ToRegister();
+      frame_->Spill(operand.reg());
+      __ add(Operand(operand.reg()), Immediate(value));
+      deferred->enter()->Branch(overflow, &operand, not_taken);
+      __ test(operand.reg(), Immediate(kSmiTagMask));
+      deferred->enter()->Branch(not_zero, &operand, not_taken);
+      deferred->BindExit(&operand);
+      frame_->Push(&operand);
       break;
     }
 
     case Token::SUB: {
       DeferredCode* deferred = NULL;
-      frame_->Pop(eax);
+      Result operand = frame_->Pop();
+      Result answer(this);  // Only allocated a new register if reversed.
       if (!reversed) {
-        deferred = new DeferredInlinedSmiSub(this, int_value, overwrite_mode);
-        __ sub(Operand(eax), Immediate(value));
+        operand.ToRegister();
+        frame_->Spill(operand.reg());
+        deferred = new DeferredInlineSmiSub(this,
+                                            smi_value,
+                                            overwrite_mode);
+        __ sub(Operand(operand.reg()), Immediate(value));
+        answer = operand;
       } else {
-        deferred = new DeferredInlinedSmiSubReversed(this, edx, overwrite_mode);
-        __ mov(edx, Operand(eax));
-        __ mov(eax, Immediate(value));
-        __ sub(eax, Operand(edx));
+        answer = allocator()->Allocate();
+        ASSERT(answer.is_valid());
+        deferred = new DeferredInlineSmiSubReversed(this,
+                                                    smi_value,
+                                                    overwrite_mode);
+        __ mov(answer.reg(), Immediate(value));
+        if (operand.is_register()) {
+          __ sub(answer.reg(), Operand(operand.reg()));
+        } else {
+          ASSERT(operand.is_constant());
+          __ sub(Operand(answer.reg()), Immediate(operand.handle()));
+        }
       }
-      __ j(overflow, deferred->enter(), not_taken);
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(not_zero, deferred->enter(), not_taken);
-      __ bind(deferred->exit());
-      frame_->Push(eax);
+      deferred->enter()->Branch(overflow, &operand, not_taken);
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      deferred->enter()->Branch(not_zero, &operand, not_taken);
+      operand.Unuse();
+      deferred->BindExit(&answer);
+      frame_->Push(&answer);
       break;
     }
 
     case Token::SAR: {
       if (reversed) {
-        frame_->Pop(eax);
-        frame_->Push(Immediate(value));
-        frame_->Push(eax);
+        Result top = frame_->Pop();
+        frame_->Push(value);
+        frame_->Push(&top);
         GenericBinaryOperation(op, type, overwrite_mode);
       } else {
-        int shift_value = int_value & 0x1f;  // only least significant 5 bits
+        // Only the least significant 5 bits of the shift value are used.
+        // In the slow case, this masking is done inside the runtime call.
+        int shift_value = int_value & 0x1f;
         DeferredCode* deferred =
-          new DeferredInlinedSmiOperation(this, Token::SAR, shift_value,
-                                          overwrite_mode);
-        frame_->Pop(eax);
-        __ test(eax, Immediate(kSmiTagMask));
-        __ j(not_zero, deferred->enter(), not_taken);
-        __ sar(eax, shift_value);
-        __ and_(eax, ~kSmiTagMask);
-        __ bind(deferred->exit());
-        frame_->Push(eax);
+            new DeferredInlineSmiOperation(this, Token::SAR, smi_value,
+                                           overwrite_mode);
+        Result result = frame_->Pop();
+        result.ToRegister();
+        __ test(result.reg(), Immediate(kSmiTagMask));
+        deferred->enter()->Branch(not_zero, &result, not_taken);
+        frame_->Spill(result.reg());
+        __ sar(result.reg(), shift_value);
+        __ and_(result.reg(), ~kSmiTagMask);
+        deferred->BindExit(&result);
+        frame_->Push(&result);
       }
       break;
     }
 
     case Token::SHR: {
       if (reversed) {
-        frame_->Pop(eax);
-        frame_->Push(Immediate(value));
-        frame_->Push(eax);
+        Result top = frame_->Pop();
+        frame_->Push(value);
+        frame_->Push(&top);
         GenericBinaryOperation(op, type, overwrite_mode);
       } else {
-        int shift_value = int_value & 0x1f;  // only least significant 5 bits
+        // Only the least significant 5 bits of the shift value are used.
+        // In the slow case, this masking is done inside the runtime call.
+        int shift_value = int_value & 0x1f;
         DeferredCode* deferred =
-        new DeferredInlinedSmiOperation(this, Token::SHR, shift_value,
-                                        overwrite_mode);
-        frame_->Pop(eax);
-        __ test(eax, Immediate(kSmiTagMask));
-        __ mov(ebx, Operand(eax));
-        __ j(not_zero, deferred->enter(), not_taken);
-        __ sar(ebx, kSmiTagSize);
-        __ shr(ebx, shift_value);
-        __ test(ebx, Immediate(0xc0000000));
-        __ j(not_zero, deferred->enter(), not_taken);
-        // tag result and store it in TOS (eax)
-        ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
-        __ lea(eax, Operand(ebx, ebx, times_1, kSmiTag));
-        __ bind(deferred->exit());
-        frame_->Push(eax);
+            new DeferredInlineSmiOperation(this, Token::SHR, smi_value,
+                                           overwrite_mode);
+        Result operand = frame_->Pop();
+        operand.ToRegister();
+        __ test(operand.reg(), Immediate(kSmiTagMask));
+        deferred->enter()->Branch(not_zero, &operand, not_taken);
+        Result answer = allocator()->Allocate();
+        ASSERT(answer.is_valid());
+        __ mov(answer.reg(), Operand(operand.reg()));
+        __ sar(answer.reg(), kSmiTagSize);
+        __ shr(answer.reg(), shift_value);
+        // A negative Smi shifted right two is in the positive Smi range.
+        if (shift_value < 2) {
+          __ test(answer.reg(), Immediate(0xc0000000));
+          deferred->enter()->Branch(not_zero, &operand, not_taken);
+        }
+        operand.Unuse();
+        ASSERT(kSmiTagSize == times_2);  // Adjust the code if not true.
+        __ lea(answer.reg(),
+               Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+        deferred->BindExit(&answer);
+        frame_->Push(&answer);
       }
       break;
     }
 
     case Token::SHL: {
       if (reversed) {
-        frame_->Pop(eax);
-        frame_->Push(Immediate(value));
-        frame_->Push(eax);
+        Result top = frame_->Pop();
+        frame_->Push(value);
+        frame_->Push(&top);
         GenericBinaryOperation(op, type, overwrite_mode);
       } else {
-        int shift_value = int_value & 0x1f;  // only least significant 5 bits
+        // Only the least significant 5 bits of the shift value are used.
+        // In the slow case, this masking is done inside the runtime call.
+        int shift_value = int_value & 0x1f;
         DeferredCode* deferred =
-        new DeferredInlinedSmiOperation(this, Token::SHL, shift_value,
-                                        overwrite_mode);
-        frame_->Pop(eax);
-        __ test(eax, Immediate(kSmiTagMask));
-        __ mov(ebx, Operand(eax));
-        __ j(not_zero, deferred->enter(), not_taken);
-        __ sar(ebx, kSmiTagSize);
-        __ shl(ebx, shift_value);
-        // This is the Smi check for the shifted result.
-        // After signed subtraction of 0xc0000000, the valid
-        // Smis are positive.
-        __ cmp(ebx, 0xc0000000);
-        __ j(sign, deferred->enter(), not_taken);
-        // Tag the result and store it on top of the frame.
-        ASSERT(kSmiTagSize == times_2);  // Adjust the code if not true.
-        __ lea(eax, Operand(ebx, ebx, times_1, kSmiTag));
-        __ bind(deferred->exit());
-        frame_->Push(eax);
+            new DeferredInlineSmiOperation(this, Token::SHL, smi_value,
+                                           overwrite_mode);
+        Result operand = frame_->Pop();
+        operand.ToRegister();
+        __ test(operand.reg(), Immediate(kSmiTagMask));
+        deferred->enter()->Branch(not_zero, &operand, not_taken);
+        Result answer = allocator()->Allocate();
+        ASSERT(answer.is_valid());
+        __ mov(answer.reg(), Operand(operand.reg()));
+        ASSERT(kSmiTag == 0);  // adjust code if not the case
+        // We do no shifts, only the Smi conversion, if shift_value is 1.
+        if (shift_value == 0) {
+          __ sar(answer.reg(), kSmiTagSize);
+        } else if (shift_value > 1) {
+          __ shl(answer.reg(), shift_value - 1);
+        }
+        // Convert int result to Smi, checking that it is in int range.
+        ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+        __ add(answer.reg(), Operand(answer.reg()));
+        deferred->enter()->Branch(overflow, &operand, not_taken);
+        operand.Unuse();
+        deferred->BindExit(&answer);
+        frame_->Push(&answer);
       }
       break;
     }
@@ -1173,43 +1220,45 @@
     case Token::BIT_AND: {
       DeferredCode* deferred = NULL;
       if (!reversed) {
-        deferred =  new DeferredInlinedSmiOperation(this, op, int_value,
-                                                    overwrite_mode);
+        deferred =  new DeferredInlineSmiOperation(this, op, smi_value,
+                                                   overwrite_mode);
       } else {
-        deferred = new DeferredInlinedSmiOperationReversed(this, op, int_value,
-                                                           overwrite_mode);
+        deferred = new DeferredInlineSmiOperationReversed(this, op, smi_value,
+                                                          overwrite_mode);
       }
-      frame_->Pop(eax);
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(not_zero, deferred->enter(), not_taken);
+      Result operand = frame_->Pop();
+      operand.ToRegister();
+      __ test(operand.reg(), Immediate(kSmiTagMask));
+      deferred->enter()->Branch(not_zero, &operand, not_taken);
+      frame_->Spill(operand.reg());
       if (op == Token::BIT_AND) {
         if (int_value == 0) {
-          __ xor_(Operand(eax), eax);
+          __ xor_(Operand(operand.reg()), operand.reg());
         } else {
-          __ and_(Operand(eax), Immediate(value));
+          __ and_(Operand(operand.reg()), Immediate(value));
         }
       } else if (op == Token::BIT_XOR) {
         if (int_value != 0) {
-          __ xor_(Operand(eax), Immediate(value));
+          __ xor_(Operand(operand.reg()), Immediate(value));
         }
       } else {
         ASSERT(op == Token::BIT_OR);
         if (int_value != 0) {
-          __ or_(Operand(eax), Immediate(value));
+          __ or_(Operand(operand.reg()), Immediate(value));
         }
       }
-      __ bind(deferred->exit());
-      frame_->Push(eax);
+      deferred->BindExit(&operand);
+      frame_->Push(&operand);
       break;
     }
 
     default: {
       if (!reversed) {
-        frame_->Push(Immediate(value));
+        frame_->Push(value);
       } else {
-        frame_->Pop(eax);
-        frame_->Push(Immediate(value));
-        frame_->Push(eax);
+        Result top = frame_->Pop();
+        frame_->Push(value);
+        frame_->Push(&top);
       }
       GenericBinaryOperation(op, type, overwrite_mode);
       break;
@@ -1246,95 +1295,189 @@
 };
 
 
-void CodeGenerator::Comparison(Condition cc, bool strict) {
+void CodeGenerator::Comparison(Condition cc,
+                               bool strict,
+                               ControlDestination* dest) {
   // Strict only makes sense for equality comparisons.
   ASSERT(!strict || cc == equal);
 
+  Result left_side(this);
+  Result right_side(this);
   // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
   if (cc == greater || cc == less_equal) {
     cc = ReverseCondition(cc);
-    frame_->Pop(edx);
-    frame_->Pop(eax);
+    left_side = frame_->Pop();
+    right_side = frame_->Pop();
   } else {
-    frame_->Pop(eax);
-    frame_->Pop(edx);
+    right_side = frame_->Pop();
+    left_side = frame_->Pop();
   }
+  ASSERT(cc == less || cc == equal || cc == greater_equal);
 
-  // Check for the smi case.
-  Label is_smi, done;
-  __ mov(ecx, Operand(eax));
-  __ or_(ecx, Operand(edx));
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(zero, &is_smi, taken);
+  // If either side is a constant smi, optimize the comparison.
+  bool left_side_constant_smi =
+      left_side.is_constant() && left_side.handle()->IsSmi();
+  bool right_side_constant_smi =
+      right_side.is_constant() && right_side.handle()->IsSmi();
+  bool left_side_constant_null =
+      left_side.is_constant() && left_side.handle()->IsNull();
+  bool right_side_constant_null =
+      right_side.is_constant() && right_side.handle()->IsNull();
 
-  // When non-smi, call out to the compare stub.  "parameters" setup by
-  // calling code in edx and eax and "result" is returned in the flags.
-  CompareStub stub(cc, strict);
-  __ CallStub(&stub);
-  if (cc == equal) {
-    __ test(eax, Operand(eax));
-  } else {
-    __ cmp(eax, 0);
+  if (left_side_constant_smi || right_side_constant_smi) {
+    if (left_side_constant_smi && right_side_constant_smi) {
+      // Trivial case, comparing two constants.
+      int left_value = Smi::cast(*left_side.handle())->value();
+      int right_value = Smi::cast(*right_side.handle())->value();
+      switch (cc) {
+        case less:
+          dest->Goto(left_value < right_value);
+          break;
+        case equal:
+          dest->Goto(left_value == right_value);
+          break;
+        case greater_equal:
+          dest->Goto(left_value >= right_value);
+          break;
+        default:
+          UNREACHABLE();
+      }
+    } else {  // Only one side is a constant Smi.
+      // If left side is a constant Smi, reverse the operands.
+      // Since one side is a constant Smi, conversion order does not matter.
+      if (left_side_constant_smi) {
+        Result temp = left_side;
+        left_side = right_side;
+        right_side = temp;
+        cc = ReverseCondition(cc);
+        // This may reintroduce greater or less_equal as the value of cc.
+        // CompareStub and the inline code both support all values of cc.
+      }
+      // Implement comparison against a constant Smi, inlining the case
+      // where both sides are Smis.
+      left_side.ToRegister();
+      ASSERT(left_side.is_valid());
+      JumpTarget is_smi(this);
+      __ test(left_side.reg(), Immediate(kSmiTagMask));
+      is_smi.Branch(zero, &left_side, &right_side, taken);
+
+      // Setup and call the compare stub, which expects arguments in edx
+      // and eax.
+      CompareStub stub(cc, strict);
+      left_side.ToRegister(edx);  // Only left_side currently uses a register.
+      right_side.ToRegister(eax);  // left_side is not in eax.  eax is free.
+      Result result = frame_->CallStub(&stub, &left_side, &right_side, 0);
+      result.ToRegister();
+      __ cmp(result.reg(), 0);
+      result.Unuse();
+      dest->true_target()->Branch(cc);
+      dest->false_target()->Jump();
+
+      is_smi.Bind(&left_side, &right_side);
+      left_side.ToRegister();
+      // Test smi equality and comparison by signed int comparison.
+      if (IsUnsafeSmi(right_side.handle())) {
+        right_side.ToRegister();
+        ASSERT(right_side.is_valid());
+        __ cmp(left_side.reg(), Operand(right_side.reg()));
+      } else {
+        __ cmp(Operand(left_side.reg()), Immediate(right_side.handle()));
+      }
+      left_side.Unuse();
+      right_side.Unuse();
+      dest->Split(cc);
+    }
+  } else if (cc == equal &&
+             (left_side_constant_null || right_side_constant_null)) {
+    // To make null checks efficient, we check if either the left side or
+    // the right side is the constant 'null'.
+    // If so, we optimize the code by inlining a null check instead of
+    // calling the (very) general runtime routine for checking equality.
+    Result operand = left_side_constant_null ? right_side : left_side;
+    right_side.Unuse();
+    left_side.Unuse();
+    operand.ToRegister();
+    __ cmp(operand.reg(), Factory::null_value());
+    if (strict) {
+      operand.Unuse();
+      dest->Split(equal);
+    } else {
+      // The 'null' value is only equal to 'undefined' if using non-strict
+      // comparisons.
+      dest->true_target()->Branch(equal);
+      __ cmp(operand.reg(), Factory::undefined_value());
+      dest->true_target()->Branch(equal);
+      __ test(operand.reg(), Immediate(kSmiTagMask));
+      dest->false_target()->Branch(equal);
+
+      // It can be an undetectable object.
+      // Use a scratch register in preference to spilling operand.reg().
+      Result temp = allocator()->Allocate();
+      ASSERT(temp.is_valid());
+      __ mov(temp.reg(),
+             FieldOperand(operand.reg(), HeapObject::kMapOffset));
+      __ movzx_b(temp.reg(),
+                 FieldOperand(temp.reg(), Map::kBitFieldOffset));
+      __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+      temp.Unuse();
+      operand.Unuse();
+      dest->Split(not_zero);
+    }
+  } else {  // Neither side is a constant Smi or null.
+    // If either side is a non-smi constant, skip the smi check.
+    bool known_non_smi =
+        (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
+        (right_side.is_constant() && !right_side.handle()->IsSmi());
+    left_side.ToRegister();
+    right_side.ToRegister();
+    JumpTarget is_smi(this);
+    if (!known_non_smi) {
+      // Check for the smi case.
+      Result temp = allocator_->Allocate();
+      ASSERT(temp.is_valid());
+      __ mov(temp.reg(), left_side.reg());
+      __ or_(temp.reg(), Operand(right_side.reg()));
+      __ test(temp.reg(), Immediate(kSmiTagMask));
+      temp.Unuse();
+      is_smi.Branch(zero, &left_side, &right_side, taken);
+    }
+    // When non-smi, call out to the compare stub.  "parameters" setup by
+    // calling code in edx and eax and "result" is returned in the flags.
+    if (!left_side.reg().is(eax)) {
+      right_side.ToRegister(eax);
+      left_side.ToRegister(edx);
+    } else if (!right_side.reg().is(edx)) {
+      left_side.ToRegister(edx);
+      right_side.ToRegister(eax);
+    } else {
+      frame_->Spill(eax);  // Can be multiply referenced, even now.
+      frame_->Spill(edx);
+      __ xchg(eax, edx);
+      // If left_side and right_side become real (non-dummy) arguments
+      // to CallStub, they need to be swapped in this case.
+    }
+    CompareStub stub(cc, strict);
+    Result answer = frame_->CallStub(&stub, &right_side, &left_side, 0);
+    if (cc == equal) {
+      __ test(answer.reg(), Operand(answer.reg()));
+    } else {
+      __ cmp(answer.reg(), 0);
+    }
+    answer.Unuse();
+    if (known_non_smi) {
+      dest->Split(cc);
+    } else {
+      dest->true_target()->Branch(cc);
+      dest->false_target()->Jump();
+      is_smi.Bind(&left_side, &right_side);
+      left_side.ToRegister();
+      right_side.ToRegister();
+      __ cmp(left_side.reg(), Operand(right_side.reg()));
+      right_side.Unuse();
+      left_side.Unuse();
+      dest->Split(cc);
+    }
   }
-  __ jmp(&done);
-
-  // Test smi equality by pointer comparison.
-  __ bind(&is_smi);
-  __ cmp(edx, Operand(eax));
-  // Fall through to |done|.
-
-  __ bind(&done);
-  cc_reg_ = cc;
-}
-
-
-class SmiComparisonDeferred: public DeferredCode {
- public:
-  SmiComparisonDeferred(CodeGenerator* generator,
-                        Condition cc,
-                        bool strict,
-                        int value)
-      : DeferredCode(generator), cc_(cc), strict_(strict), value_(value) {
-    set_comment("[ ComparisonDeferred");
-  }
-  virtual void Generate();
-
- private:
-  Condition cc_;
-  bool strict_;
-  int value_;
-};
-
-
-void SmiComparisonDeferred::Generate() {
-  CompareStub stub(cc_, strict_);
-  // Setup parameters and call stub.
-  __ mov(edx, Operand(eax));
-  __ Set(eax, Immediate(Smi::FromInt(value_)));
-  __ CallStub(&stub);
-  __ cmp(eax, 0);
-  // "result" is returned in the flags
-}
-
-
-void CodeGenerator::SmiComparison(Condition cc,
-                                      Handle<Object> value,
-                                      bool strict) {
-  // Strict only makes sense for equality comparisons.
-  ASSERT(!strict || cc == equal);
-
-  int int_value = Smi::cast(*value)->value();
-  ASSERT(is_intn(int_value, kMaxSmiInlinedBits));
-
-  SmiComparisonDeferred* deferred =
-      new SmiComparisonDeferred(this, cc, strict, int_value);
-  frame_->Pop(eax);
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, deferred->enter(), not_taken);
-  // Test smi equality by pointer comparison.
-  __ cmp(Operand(eax), Immediate(value));
-  __ bind(deferred->exit());
-  cc_reg_ = cc;
 }
 
 
@@ -1361,7 +1504,8 @@
 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
                                       int position) {
   // Push the arguments ("left-to-right") on the stack.
-  for (int i = 0; i < args->length(); i++) {
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
     Load(args->at(i));
   }
 
@@ -1369,58 +1513,84 @@
   CodeForSourcePosition(position);
 
   // Use the shared code stub to call the function.
-  CallFunctionStub call_function(args->length());
-  __ CallStub(&call_function);
-
-  // Restore context and pop function from the stack.
-  __ mov(esi, frame_->Context());
-  __ mov(frame_->Top(), eax);
+  CallFunctionStub call_function(arg_count);
+  Result answer = frame_->CallStub(&call_function, arg_count + 1);
+  // Restore context and replace function on the stack with the
+  // result of the stub invocation.
+  frame_->RestoreContextRegister();
+  frame_->SetElementAt(0, &answer);
 }
 
 
-void CodeGenerator::Branch(bool if_true, Label* L) {
-  ASSERT(has_cc());
-  Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
-  __ j(cc, L);
-  cc_reg_ = no_condition;
+class DeferredStackCheck: public DeferredCode {
+ public:
+  explicit DeferredStackCheck(CodeGenerator* generator)
+      : DeferredCode(generator) {
+    set_comment("[ DeferredStackCheck");
+  }
+
+  virtual void Generate();
+};
+
+
+void DeferredStackCheck::Generate() {
+  enter()->Bind();
+  StackCheckStub stub;
+  Result ignored = generator()->frame()->CallStub(&stub, 0);
+  ignored.Unuse();
+  exit_.Jump();
 }
 
 
 void CodeGenerator::CheckStack() {
   if (FLAG_check_stack) {
-    Label stack_is_ok;
-    StackCheckStub stub;
+    DeferredStackCheck* deferred = new DeferredStackCheck(this);
     ExternalReference stack_guard_limit =
         ExternalReference::address_of_stack_guard_limit();
     __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
-    __ j(above_equal, &stack_is_ok, taken);
-    __ CallStub(&stub);
-    __ bind(&stack_is_ok);
+    deferred->enter()->Branch(below, not_taken);
+    deferred->BindExit();
+  }
+}
+
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+  ASSERT(!in_spilled_code());
+  for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
+    Visit(statements->at(i));
   }
 }
 
 
 void CodeGenerator::VisitBlock(Block* node) {
+  ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ Block");
-  CodeForStatement(node);
-  node->set_break_stack_height(break_stack_height_);
+  CodeForStatementPosition(node);
+  node->break_target()->Initialize(this);
   VisitStatements(node->statements());
-  __ bind(node->break_target());
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->break_target()->Unuse();
 }
 
 
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  frame_->Push(Immediate(pairs));
-  frame_->Push(esi);
-  frame_->Push(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
-  __ CallRuntime(Runtime::kDeclareGlobals, 3);
+  frame_->Push(pairs);
+
+  // Duplicate the context register.
+  Result context(esi, this);
+  frame_->Push(&context);
+
+  frame_->Push(Smi::FromInt(is_eval() ? 1 : 0));
+  Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
 
 
 void CodeGenerator::VisitDeclaration(Declaration* node) {
   Comment cmnt(masm_, "[ Declaration");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
   Variable* var = node->proxy()->var();
   ASSERT(var != NULL);  // must have been resolved
   Slot* slot = var->slot();
@@ -1432,25 +1602,26 @@
     // Variables with a "LOOKUP" slot were introduced as non-locals
     // during variable resolution and must have mode DYNAMIC.
     ASSERT(var->is_dynamic());
-    // For now, just do a runtime call.
-    frame_->Push(esi);
-    frame_->Push(Immediate(var->name()));
+    // For now, just do a runtime call.  Duplicate the context register.
+    Result context(esi, this);
+    frame_->Push(&context);
+    frame_->Push(var->name());
     // Declaration nodes are always introduced in one of two modes.
     ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
     PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
-    frame_->Push(Immediate(Smi::FromInt(attr)));
+    frame_->Push(Smi::FromInt(attr));
     // Push initial value, if any.
     // Note: For variables we must not push an initial value (such as
     // 'undefined') because we may have a (legal) redeclaration and we
     // must not destroy the current value.
     if (node->mode() == Variable::CONST) {
-      frame_->Push(Immediate(Factory::the_hole_value()));
+      frame_->Push(Factory::the_hole_value());
     } else if (node->fun() != NULL) {
       Load(node->fun());
     } else {
-      frame_->Push(Immediate(0));  // no initial value!
+      frame_->Push(Smi::FromInt(0));  // no initial value!
     }
-    __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+    Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
     // Ignore the return value (declarations are statements).
     return;
   }
@@ -1467,7 +1638,7 @@
 
   if (val != NULL) {
     {
-      // Set initial value.
+      // Set the initial value.
       Reference target(this, node->proxy());
       Load(val);
       target.SetValue(NOT_CONST_INIT);
@@ -1475,197 +1646,262 @@
       // it goes out of scope.
     }
     // Get rid of the assigned value (declarations are statements).
-    frame_->Pop();
+    frame_->Drop();
   }
 }
 
 
 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+  ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ ExpressionStatement");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
   Expression* expression = node->expression();
   expression->MarkAsStatement();
   Load(expression);
   // Remove the lingering expression result from the top of stack.
-  frame_->Pop();
+  frame_->Drop();
 }
 
 
 void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+  ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "// EmptyStatement");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
   // nothing to do
 }
 
 
 void CodeGenerator::VisitIfStatement(IfStatement* node) {
+  ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ IfStatement");
-  // Generate different code depending on which
-  // parts of the if statement are present or not.
+  // Generate different code depending on which parts of the if statement
+  // are present or not.
   bool has_then_stm = node->HasThenStatement();
   bool has_else_stm = node->HasElseStatement();
 
-  CodeForStatement(node);
-  Label exit;
+  CodeForStatementPosition(node);
+  JumpTarget exit(this);
   if (has_then_stm && has_else_stm) {
-    Label then;
-    Label else_;
-    // if (cond)
-    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &else_, true);
-    Branch(false, &else_);
-    // then
-    __ bind(&then);
-    Visit(node->then_statement());
-    __ jmp(&exit);
-    // else
-    __ bind(&else_);
-    Visit(node->else_statement());
+    JumpTarget then(this);
+    JumpTarget else_(this);
+    ControlDestination dest(&then, &else_, true);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+    if (dest.false_was_fall_through()) {
+      // The else target was bound, so we compile the else part first.
+      Visit(node->else_statement());
+
+      // We may have dangling jumps to the then part.
+      if (then.is_linked()) {
+        if (has_valid_frame()) exit.Jump();
+        then.Bind();
+        Visit(node->then_statement());
+      }
+    } else {
+      // The then target was bound, so we compile the then part first.
+      Visit(node->then_statement());
+
+      if (else_.is_linked()) {
+        if (has_valid_frame()) exit.Jump();
+        else_.Bind();
+        Visit(node->else_statement());
+      }
+    }
 
   } else if (has_then_stm) {
     ASSERT(!has_else_stm);
-    Label then;
-    // if (cond)
-    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &exit, true);
-    Branch(false, &exit);
-    // then
-    __ bind(&then);
-    Visit(node->then_statement());
+    JumpTarget then(this);
+    ControlDestination dest(&then, &exit, true);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+    if (dest.false_was_fall_through()) {
+      // The exit label was bound.  We may have dangling jumps to the
+      // then part.
+      if (then.is_linked()) {
+        exit.Unuse();
+        exit.Jump();
+        then.Bind();
+        Visit(node->then_statement());
+      }
+    } else {
+      // The then label was bound.
+      Visit(node->then_statement());
+    }
 
   } else if (has_else_stm) {
     ASSERT(!has_then_stm);
-    Label else_;
-    // if (!cond)
-    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &exit, &else_, true);
-    Branch(true, &exit);
-    // else
-    __ bind(&else_);
-    Visit(node->else_statement());
+    JumpTarget else_(this);
+    ControlDestination dest(&exit, &else_, false);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+    if (dest.true_was_fall_through()) {
+      // The exit label was bound.  We may have dangling jumps to the
+      // else part.
+      if (else_.is_linked()) {
+        exit.Unuse();
+        exit.Jump();
+        else_.Bind();
+        Visit(node->else_statement());
+      }
+    } else {
+      // The else label was bound.
+      Visit(node->else_statement());
+    }
 
   } else {
     ASSERT(!has_then_stm && !has_else_stm);
-    // if (cond)
-    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &exit, &exit, false);
-    if (has_cc()) {
-      cc_reg_ = no_condition;
-    } else {
-      // No cc value set up, that means the boolean was pushed.
-      // Pop it again, since it is not going to be used.
-      frame_->Pop();
+    // We only care about the condition's side effects (not its value
+    // or control flow effect).  LoadCondition is called without
+    // forcing control flow.
+    ControlDestination dest(&exit, &exit, true);
+    LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
+    if (!dest.is_used()) {
+      // We got a value on the frame rather than (or in addition to)
+      // control flow.
+      frame_->Drop();
     }
   }
 
-  // end
-  __ bind(&exit);
-}
-
-
-void CodeGenerator::CleanStack(int num_bytes) {
-  ASSERT(num_bytes % kPointerSize == 0);
-  frame_->Drop(num_bytes / kPointerSize);
+  if (exit.is_linked()) {
+    exit.Bind();
+  }
 }
 
 
 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+  ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ ContinueStatement");
-  CodeForStatement(node);
-  CleanStack(break_stack_height_ - node->target()->break_stack_height());
-  __ jmp(node->target()->continue_target());
+  CodeForStatementPosition(node);
+  node->target()->continue_target()->Jump();
 }
 
 
 void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+  ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ BreakStatement");
-  CodeForStatement(node);
-  CleanStack(break_stack_height_ - node->target()->break_stack_height());
-  __ jmp(node->target()->break_target());
+  CodeForStatementPosition(node);
+  node->target()->break_target()->Jump();
 }
 
 
 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+  ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ ReturnStatement");
-  CodeForStatement(node);
-  Load(node->expression());
 
-  // Move the function result into eax
-  frame_->Pop(eax);
-
-  // If we're inside a try statement or the return instruction
-  // sequence has been generated, we just jump to that
-  // point. Otherwise, we generate the return instruction sequence and
-  // bind the function return label.
-  if (is_inside_try_ || function_return_.is_bound()) {
-    __ jmp(&function_return_);
+  if (function_return_is_shadowed_) {
+    // If the function return is shadowed, we spill all information
+    // and just jump to the label.
+    VirtualFrame::SpilledScope spilled_scope(this);
+    CodeForStatementPosition(node);
+    LoadAndSpill(node->expression());
+    frame_->EmitPop(eax);
+    function_return_.Jump();
   } else {
-    __ bind(&function_return_);
-    if (FLAG_trace) {
-      frame_->Push(eax);  // undo the pop(eax) from above
-      __ CallRuntime(Runtime::kTraceExit, 1);
+    // Load the returned value.
+    CodeForStatementPosition(node);
+    Load(node->expression());
+
+    // Pop the result from the frame and prepare the frame for
+    // returning thus making it easier to merge.
+    Result result = frame_->Pop();
+    frame_->PrepareForReturn();
+
+    // Move the result into register eax where it belongs.
+    result.ToRegister(eax);
+    // TODO(203): Instead of explictly calling Unuse on the result, it
+    // might be better to pass the result to Jump and Bind below.
+    result.Unuse();
+
+    // If the function return label is already bound, we reuse the
+    // code by jumping to the return site.
+    if (function_return_.is_bound()) {
+      function_return_.Jump();
+    } else {
+      function_return_.Bind();
+      GenerateReturnSequence();
     }
-
-    // Add a label for checking the size of the code used for returning.
-    Label check_exit_codesize;
-    __ bind(&check_exit_codesize);
-
-    // Leave the frame and return popping the arguments and the
-    // receiver.
-    frame_->Exit();
-    __ ret((scope_->num_parameters() + 1) * kPointerSize);
-
-    // Check that the size of the code used for returning matches what is
-    // expected by the debugger.
-    ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
-              __ SizeOfCodeGeneratedSince(&check_exit_codesize));
   }
 }
 
 
+void CodeGenerator::GenerateReturnSequence() {
+  // The return value is a live (but not currently reference counted)
+  // reference to eax.  This is safe because the current frame does not
+  // contain a reference to eax (it is prepared for the return by spilling
+  // all registers).
+  ASSERT(has_valid_frame());
+  if (FLAG_trace) {
+    frame_->Push(eax);  // Materialize result on the stack.
+    frame_->CallRuntime(Runtime::kTraceExit, 1);
+  }
+
+  // Add a label for checking the size of the code used for returning.
+  Label check_exit_codesize;
+  __ bind(&check_exit_codesize);
+
+  // Leave the frame and return popping the arguments and the
+  // receiver.
+  frame_->Exit();
+  __ ret((scope_->num_parameters() + 1) * kPointerSize);
+  DeleteFrame();
+
+  // Check that the size of the code used for returning matches what is
+  // expected by the debugger.
+  ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
+            __ SizeOfCodeGeneratedSince(&check_exit_codesize));
+}
+
+
 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+  ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ WithEnterStatement");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
   Load(node->expression());
+  Result context(this);
   if (node->is_catch_block()) {
-    __ CallRuntime(Runtime::kPushCatchContext, 1);
+    context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
   } else {
-    __ CallRuntime(Runtime::kPushContext, 1);
-  }
-
-  if (kDebug) {
-    Label verified_true;
-    // Verify eax and esi are the same in debug mode
-    __ cmp(eax, Operand(esi));
-    __ j(equal, &verified_true);
-    __ int3();
-    __ bind(&verified_true);
+    context = frame_->CallRuntime(Runtime::kPushContext, 1);
   }
 
   // Update context local.
-  __ mov(frame_->Context(), esi);
+  frame_->SaveContextRegister();
+
+  // Verify that the runtime call result and esi agree.
+  if (FLAG_debug_code) {
+    __ cmp(context.reg(), Operand(esi));
+    __ Assert(equal, "Runtime::NewContext should end up in esi");
+  }
 }
 
 
 void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+  ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ WithExitStatement");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
   // Pop context.
   __ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
   // Update context local.
-  __ mov(frame_->Context(), esi);
+  frame_->SaveContextRegister();
 }
 
+
 int CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
     return kFastSwitchMaxOverheadFactor;
 }
 
+
 int CodeGenerator::FastCaseSwitchMinCaseCount() {
     return kFastSwitchMinCaseCount;
 }
 
+
 // Generate a computed jump to a switch case.
 void CodeGenerator::GenerateFastCaseSwitchJumpTable(
     SwitchStatement* node,
     int min_index,
     int range,
-    Label* fail_label,
+    Label* default_label,
     Vector<Label*> case_targets,
     Vector<Label> case_labels) {
   // Notice: Internal references, used by both the jmp instruction and
@@ -1676,129 +1912,245 @@
   // placeholders, and fill in the addresses after the labels have been
   // bound.
 
-  frame_->Pop(eax);  // supposed Smi
-  // check range of value, if outside [0..length-1] jump to default/end label.
+  JumpTarget setup_default(this);
+  JumpTarget is_smi(this);
+
+  // A non-null default label pointer indicates a default case among
+  // the case labels.  Otherwise we use the break target as a
+  // "default".
+  JumpTarget* default_target =
+      (default_label == NULL) ? node->break_target() : &setup_default;
+
+  // Test whether input is a smi.
   ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  Result switch_value = frame_->Pop();
+  switch_value.ToRegister();
+  __ test(switch_value.reg(), Immediate(kSmiTagMask));
+  is_smi.Branch(equal, &switch_value, taken);
 
-  // Test whether input is a HeapNumber that is really a Smi
-  Label is_smi;
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(equal, &is_smi);
-  // It's a heap object, not a Smi or a Failure
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ cmp(ebx, HEAP_NUMBER_TYPE);
-  __ j(not_equal, fail_label);
-  // eax points to a heap number.
-  __ push(eax);
-  __ CallRuntime(Runtime::kNumberToSmi, 1);
-  __ bind(&is_smi);
+  // It's a heap object, not a smi or a failure.  Check if it is a
+  // heap number.
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
+  __ CmpObjectType(switch_value.reg(), HEAP_NUMBER_TYPE, temp.reg());
+  temp.Unuse();
+  default_target->Branch(not_equal);
 
+  // The switch value is a heap number.  Convert it to a smi.
+  frame_->Push(&switch_value);
+  Result smi_value = frame_->CallRuntime(Runtime::kNumberToSmi, 1);
+
+  is_smi.Bind(&smi_value);
+  smi_value.ToRegister();
+  // Convert the switch value to a 0-based table index.
   if (min_index != 0) {
-    __ sub(Operand(eax), Immediate(min_index << kSmiTagSize));
+    frame_->Spill(smi_value.reg());
+    __ sub(Operand(smi_value.reg()), Immediate(min_index << kSmiTagSize));
   }
-  __ test(eax, Immediate(0x80000000 | kSmiTagMask));  // negative or not Smi
-  __ j(not_equal, fail_label, not_taken);
-  __ cmp(eax, range << kSmiTagSize);
-  __ j(greater_equal, fail_label, not_taken);
+  // Go to the default case if the table index is negative or not a smi.
+  __ test(smi_value.reg(), Immediate(0x80000000 | kSmiTagMask));
+  default_target->Branch(not_equal, not_taken);
+  __ cmp(smi_value.reg(), range << kSmiTagSize);
+  default_target->Branch(greater_equal, not_taken);
+
+  // The expected frame at all the case labels is a version of the
+  // current one (the bidirectional entry frame, which an arbitrary
+  // frame of the correct height can be merged to).  Keep a copy to
+  // restore at the start of every label.  Create a jump target and
+  // bind it to set its entry frame properly.
+  JumpTarget entry_target(this, JumpTarget::BIDIRECTIONAL);
+  entry_target.Bind(&smi_value);
+  VirtualFrame* start_frame = new VirtualFrame(frame_);
 
   // 0 is placeholder.
-  __ jmp(Operand(eax, eax, times_1, 0x0, RelocInfo::INTERNAL_REFERENCE));
-  // calculate address to overwrite later with actual address of table.
+  // Jump to the address at table_address + 2 * smi_value.reg().
+  // The target of the jump is read from table_address + 4 * switch_value.
+  // The Smi encoding of smi_value.reg() is 2 * switch_value.
+  smi_value.ToRegister();
+  __ jmp(Operand(smi_value.reg(), smi_value.reg(),
+                 times_1, 0x0, RelocInfo::INTERNAL_REFERENCE));
+  smi_value.Unuse();
+  // Calculate address to overwrite later with actual address of table.
   int32_t jump_table_ref = __ pc_offset() - sizeof(int32_t);
-
   __ Align(4);
   Label table_start;
   __ bind(&table_start);
   __ WriteInternalReference(jump_table_ref, table_start);
 
   for (int i = 0; i < range; i++) {
-    // table entry, 0 is placeholder for case address
+    // These are the table entries. 0x0 is the placeholder for case address.
     __ dd(0x0, RelocInfo::INTERNAL_REFERENCE);
   }
 
-  GenerateFastCaseSwitchCases(node, case_labels);
+  GenerateFastCaseSwitchCases(node, case_labels, start_frame);
+
+  // If there was a default case, we need to emit the code to match it.
+  if (default_label != NULL) {
+    if (has_valid_frame()) {
+      node->break_target()->Jump();
+    }
+    setup_default.Bind();
+    frame_->MergeTo(start_frame);
+    __ jmp(default_label);
+    DeleteFrame();
+  }
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
 
   for (int i = 0, entry_pos = table_start.pos();
-       i < range; i++, entry_pos += sizeof(uint32_t)) {
-    __ WriteInternalReference(entry_pos, *case_targets[i]);
+       i < range;
+       i++, entry_pos += sizeof(uint32_t)) {
+    if (case_targets[i] == NULL) {
+      __ WriteInternalReference(entry_pos,
+                                *node->break_target()->entry_label());
+    } else {
+      __ WriteInternalReference(entry_pos, *case_targets[i]);
+    }
   }
+
+  delete start_frame;
 }
 
 
 void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+  ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ SwitchStatement");
-  CodeForStatement(node);
-  node->set_break_stack_height(break_stack_height_);
+  CodeForStatementPosition(node);
+  node->break_target()->Initialize(this);
 
+  // Compile the switch value.
   Load(node->tag());
 
   if (TryGenerateFastCaseSwitchStatement(node)) {
     return;
   }
 
-  Label next, fall_through, default_case;
   ZoneList<CaseClause*>* cases = node->cases();
   int length = cases->length();
+  CaseClause* default_clause = NULL;
 
-  for (int i = 0; i < length; i++) {
+  JumpTarget next_test(this);
+  // Compile the case label expressions and comparisons.  Exit early
+  // if a comparison is unconditionally true.  The target next_test is
+  // bound before the loop in order to indicate control flow to the
+  // first comparison.
+  next_test.Bind();
+  for (int i = 0; i < length && !next_test.is_unused(); i++) {
     CaseClause* clause = cases->at(i);
-    Comment cmnt(masm_, "[ case clause");
-
+    clause->body_target()->Initialize(this);
+    // The default is not a test, but remember it for later.
     if (clause->is_default()) {
-      // Continue matching cases. The program will execute the default case's
-      // statements if it does not match any of the cases.
-      __ jmp(&next);
-
-      // Bind the default case label, so we can branch to it when we
-      // have compared against all other cases.
-      ASSERT(default_case.is_unused());  // at most one default clause
-      __ bind(&default_case);
-    } else {
-      __ bind(&next);
-      next.Unuse();
-      __ mov(eax, frame_->Top());
-      frame_->Push(eax);  // duplicate TOS
-      Load(clause->label());
-      Comparison(equal, true);
-      Branch(false, &next);
+      default_clause = clause;
+      continue;
     }
 
-    // Entering the case statement for the first time. Remove the switch value
-    // from the stack.
-    frame_->Pop(eax);
+    Comment cmnt(masm_, "[ Case comparison");
+    // We recycle the same target next_test for each test.  Bind it if
+    // the previous test has not done so and then unuse it for the
+    // loop.
+    if (next_test.is_linked()) {
+      next_test.Bind();
+    }
+    next_test.Unuse();
 
-    // Generate code for the body.
-    // This is also the target for the fall through from the previous case's
-    // statements which has to skip over the matching code and the popping of
-    // the switch value.
-    __ bind(&fall_through);
-    fall_through.Unuse();
-    VisitStatements(clause->statements());
-    __ jmp(&fall_through);
+    // Duplicate the switch value.
+    frame_->Dup();
+
+    // Compile the label expression.
+    Load(clause->label());
+
+    // Compare and branch to the body if true or the next test if
+    // false.  Prefer the next test as a fall through.
+    ControlDestination dest(clause->body_target(), &next_test, false);
+    Comparison(equal, true, &dest);
+
+    // If the comparison fell through to the true target, jump to the
+    // actual body.
+    if (dest.true_was_fall_through()) {
+      clause->body_target()->Unuse();
+      clause->body_target()->Jump();
+    }
   }
 
-  __ bind(&next);
-  // Reached the end of the case statements without matching any of the cases.
-  if (default_case.is_bound()) {
-    // A default case exists -> execute its statements.
-    __ jmp(&default_case);
-  } else {
-    // Remove the switch value from the stack.
-    frame_->Pop();
+  // If there was control flow to a next test from the last one
+  // compiled, compile a jump to the default or break target.
+  if (!next_test.is_unused()) {
+    if (next_test.is_linked()) {
+      next_test.Bind();
+    }
+    // Drop the switch value.
+    frame_->Drop();
+    if (default_clause != NULL) {
+      default_clause->body_target()->Jump();
+    } else {
+      node->break_target()->Jump();
+    }
   }
 
-  __ bind(&fall_through);
-  __ bind(node->break_target());
+
+  // The last instruction emitted was a jump, either to the default
+  // clause or the break target, or else to a case body from the loop
+  // that compiles the tests.
+  ASSERT(!has_valid_frame());
+  // Compile case bodies as needed.
+  for (int i = 0; i < length; i++) {
+    CaseClause* clause = cases->at(i);
+
+    // There are two ways to reach the body: from the corresponding
+    // test or as the fall through of the previous body.
+    if (clause->body_target()->is_linked() || has_valid_frame()) {
+      if (clause->body_target()->is_linked()) {
+        if (has_valid_frame()) {
+          // If we have both a jump to the test and a fall through, put
+          // a jump on the fall through path to avoid the dropping of
+          // the switch value on the test path.  The exception is the
+          // default which has already had the switch value dropped.
+          if (clause->is_default()) {
+            clause->body_target()->Bind();
+          } else {
+            JumpTarget body(this);
+            body.Jump();
+            clause->body_target()->Bind();
+            frame_->Drop();
+            body.Bind();
+          }
+        } else {
+          // No fall through to worry about.
+          clause->body_target()->Bind();
+          if (!clause->is_default()) {
+            frame_->Drop();
+          }
+        }
+      } else {
+        // Otherwise, we have only fall through.
+        ASSERT(has_valid_frame());
+      }
+
+      // We are now prepared to compile the body.
+      Comment cmnt(masm_, "[ Case body");
+      VisitStatements(clause->statements());
+    }
+    clause->body_target()->Unuse();
+  }
+
+  // We may not have a valid frame here so bind the break target only
+  // if needed.
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  node->break_target()->Unuse();
 }
 
 
 void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+  ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ LoopStatement");
-  CodeForStatement(node);
-  node->set_break_stack_height(break_stack_height_);
+  CodeForStatementPosition(node);
+  node->break_target()->Initialize(this);
 
-  // simple condition analysis
+  // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
+  // known result for the test expression, with no side effects.
   enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
   if (node->cond() == NULL) {
     ASSERT(node->type() == LoopStatement::FOR_LOOP);
@@ -1814,83 +2166,330 @@
     }
   }
 
-  Label loop, entry;
+  switch (node->type()) {
+    case LoopStatement::DO_LOOP: {
+      JumpTarget body(this, JumpTarget::BIDIRECTIONAL);
+      IncrementLoopNesting();
 
-  // init
-  if (node->init() != NULL) {
-    ASSERT(node->type() == LoopStatement::FOR_LOOP);
-    Visit(node->init());
-  }
-  if (node->type() != LoopStatement::DO_LOOP && info != ALWAYS_TRUE) {
-    __ jmp(&entry);
-  }
+      // Label the top of the loop for the backward jump if necessary.
+      if (info == ALWAYS_TRUE) {
+        // Use the continue target.
+        node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else if (info == ALWAYS_FALSE) {
+        // No need to label it.
+        node->continue_target()->Initialize(this);
+      } else {
+        // Continue is the test, so use the backward body target.
+        ASSERT(info == DONT_KNOW);
+        node->continue_target()->Initialize(this);
+        body.Bind();
+      }
 
-  IncrementLoopNesting();
+      CheckStack();  // TODO(1222600): ignore if body contains calls.
+      Visit(node->body());
 
-  // body
-  __ bind(&loop);
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  Visit(node->body());
+      // Compile the test.
+      if (info == ALWAYS_TRUE) {
+        // If control flow can fall off the end of the body, jump back
+        // to the top and bind the break target at the exit.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
+        }
+        if (node->break_target()->is_linked()) {
+          node->break_target()->Bind();
+        }
 
-  // next
-  __ bind(node->continue_target());
-  if (node->next() != NULL) {
-    // Record source position of the statement as this code which is after the
-    // code for the body actually belongs to the loop statement and not the
-    // body.
-    CodeForStatement(node);
-    ASSERT(node->type() == LoopStatement::FOR_LOOP);
-    Visit(node->next());
-  }
+      } else if (info == ALWAYS_FALSE) {
+        // We may have had continues or breaks in the body.
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+        if (node->break_target()->is_linked()) {
+          node->break_target()->Bind();
+        }
 
-  // cond
-  __ bind(&entry);
-  switch (info) {
-    case ALWAYS_TRUE:
-      __ jmp(&loop);
+      } else {
+        ASSERT(info == DONT_KNOW);
+        // We have to compile the test expression if it can be reached by
+        // control flow falling out of the body or via continue.
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+        if (has_valid_frame()) {
+          ControlDestination dest(&body, node->break_target(), false);
+          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+        }
+        if (node->break_target()->is_linked()) {
+          node->break_target()->Bind();
+        }
+      }
       break;
-    case ALWAYS_FALSE:
+    }
+
+    case LoopStatement::WHILE_LOOP: {
+      // TODO(260): This flag controls whether to duplicate the test
+      // at the bottom of the loop.  Replace it with a better
+      // indication of when it is safe to do so.
+      static const bool test_at_bottom = false;
+
+      JumpTarget body(this);  // Initialized as forward-only.
+      IncrementLoopNesting();
+
+      // If the condition is always false and has no side effects, we
+      // do not need to compile anything.
+      if (info == ALWAYS_FALSE) break;
+
+      // Based on the condition analysis, compile the test as necessary.
+      if (info == ALWAYS_TRUE) {
+        // We will not compile the test expression.  Label the top of
+        // the loop with the continue target.
+        node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
+        if (test_at_bottom) {
+          // Continue is the test at the bottom, no need to label the
+          // test at the top.  The body is a backward target.
+          node->continue_target()->Initialize(this);
+          body.make_bidirectional();
+        } else {
+          // Label the test at the top as the continue target.  The
+          // body is a forward-only target.
+          node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->Bind();
+        }
+        // Compile the test with the body as the true target and
+        // preferred fall-through and with the break target as the
+        // false target.
+        ControlDestination dest(&body, node->break_target(), true);
+        LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+        if (dest.false_was_fall_through()) {
+          // If we got the break target as fall-through, the test may
+          // have been unconditionally false (if there are no jumps to
+          // the body).
+          if (!body.is_linked()) break;
+
+          // Otherwise, jump around the body on the fall through and
+          // then bind the body target.
+          node->break_target()->Unuse();
+          node->break_target()->Jump();
+          body.Bind();
+        }
+      }
+
+      CheckStack();  // TODO(1222600): ignore if body contains calls.
+      Visit(node->body());
+
+      // Based on the condition analysis, compile the backward jump as
+      // necessary.
+      if (info == ALWAYS_TRUE) {
+        // The loop body has been labeled with the continue target.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
+        if (test_at_bottom) {
+          // If we have chosen to recompile the test at the bottom,
+          // then it is the continue target.
+          if (node->continue_target()->is_linked()) {
+            node->continue_target()->Bind();
+          }
+          if (has_valid_frame()) {
+            // The break target is the fall-through (body is a backward
+            // jump from here and thus an invalid fall-through).
+            ControlDestination dest(&body, node->break_target(), false);
+            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+          }
+        } else {
+          // If we have chosen not to recompile the test at the
+          // bottom, jump back to the one at the top.
+          if (has_valid_frame()) {
+            node->continue_target()->Jump();
+          }
+        }
+      }
+
+      // The break target may be already bound (by the condition), or
+      // there may not be a valid frame.  Bind it only if needed.
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
+      }
       break;
-    case DONT_KNOW:
-      LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &loop,
-                    node->break_target(), true);
-      Branch(true, &loop);
+    }
+
+    case LoopStatement::FOR_LOOP: {
+      // TODO(260): This flag controls whether to duplicate the test
+      // at the bottom of the loop.  Replace it with a better
+      // indication of when it is safe to do so.
+      static const bool test_at_bottom = false;
+
+      JumpTarget loop(this, JumpTarget::BIDIRECTIONAL);
+      JumpTarget body(this);
+
+      // Compile the init expression if present.
+      if (node->init() != NULL) {
+        Visit(node->init());
+      }
+
+      IncrementLoopNesting();
+
+      // If the condition is always false and has no side effects, we
+      // do not need to compile anything else.
+      if (info == ALWAYS_FALSE) break;
+
+      // Based on the condition analysis, compile the test as necessary.
+      if (info == ALWAYS_TRUE) {
+        // We will not compile the test expression.  Label the top of
+        // the loop.
+        if (node->next() == NULL) {
+          // Use the continue target if there is no update expression.
+          node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->Bind();
+        } else {
+          // Otherwise use the backward loop target.
+          node->continue_target()->Initialize(this);
+          loop.Bind();
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);
+        if (test_at_bottom) {
+          // Continue is either the update expression or the test at
+          // the bottom, no need to label the test at the top.
+          node->continue_target()->Initialize(this);
+        } else if (node->next() == NULL) {
+          // We are not recompiling the test at the bottom and there
+          // is no update expression.
+          node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+          node->continue_target()->Bind();
+        } else {
+          // We are not recompiling the test at the bottom and there
+          // is an update expression.
+          node->continue_target()->Initialize(this);
+          loop.Bind();
+        }
+
+        // Compile the test with the body as the true target and
+        // preferred fall-through and with the break target as the
+        // false target.
+        ControlDestination dest(&body, node->break_target(), true);
+        LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+        if (dest.false_was_fall_through()) {
+          // If we got the break target as fall-through, the test may
+          // have been unconditionally false (if there are no jumps to
+          // the body).
+          if (!body.is_linked()) break;
+
+          // Otherwise, jump around the body on the fall through and
+          // then bind the body target.
+          node->break_target()->Unuse();
+          node->break_target()->Jump();
+          body.Bind();
+        }
+      }
+
+      CheckStack();  // TODO(1222600): ignore if body contains calls.
+      Visit(node->body());
+
+      // If there is an update expression, compile it if necessary.
+      if (node->next() != NULL) {
+        if (node->continue_target()->is_linked()) {
+          node->continue_target()->Bind();
+        }
+
+        // Control can reach the update by falling out of the body or
+        // by a continue.
+        if (has_valid_frame()) {
+          // Record the source position of the statement as this code
+          // which is after the code for the body actually belongs to
+          // the loop statement and not the body.
+          CodeForStatementPosition(node);
+          Visit(node->next());
+        }
+      }
+
+      // Based on the condition analysis, compile the backward jump as
+      // necessary.
+      if (info == ALWAYS_TRUE) {
+        if (has_valid_frame()) {
+          if (node->next() == NULL) {
+            node->continue_target()->Jump();
+          } else {
+            loop.Jump();
+          }
+        }
+      } else {
+        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
+        if (test_at_bottom) {
+          if (node->continue_target()->is_linked()) {
+            // We can have dangling jumps to the continue target if
+            // there was no update expression.
+            node->continue_target()->Bind();
+          }
+          // Control can reach the test at the bottom by falling out
+          // of the body, by a continue in the body, or from the
+          // update expression.
+          if (has_valid_frame()) {
+            // The break target is the fall-through (body is a
+            // backward jump from here).
+            ControlDestination dest(&body, node->break_target(), false);
+            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+          }
+        } else {
+          // Otherwise, jump back to the test at the top.
+          if (has_valid_frame()) {
+            if (node->next() == NULL) {
+              node->continue_target()->Jump();
+            } else {
+              loop.Jump();
+            }
+          }
+        }
+      }
+
+      // The break target may be already bound (by the condition), or
+      // there may not be a valid frame.  Bind it only if needed.
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
+      }
       break;
+    }
   }
 
   DecrementLoopNesting();
-
-  // exit
-  __ bind(node->break_target());
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
 }
 
 
 void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ ForInStatement");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
-  // We keep stuff on the stack while the body is executing.
-  // Record it, so that a break/continue crossing this statement
-  // can restore the stack.
-  const int kForInStackSize = 5 * kPointerSize;
-  break_stack_height_ += kForInStackSize;
-  node->set_break_stack_height(break_stack_height_);
-
-  Label loop, next, entry, cleanup, exit, primitive, jsobject;
-  Label end_del_check, fixed_array;
+  JumpTarget primitive(this);
+  JumpTarget jsobject(this);
+  JumpTarget fixed_array(this);
+  JumpTarget entry(this, JumpTarget::BIDIRECTIONAL);
+  JumpTarget end_del_check(this);
+  JumpTarget exit(this);
 
   // Get the object to enumerate over (converted to JSObject).
-  Load(node->enumerable());
+  LoadAndSpill(node->enumerable());
 
   // Both SpiderMonkey and kjs ignore null and undefined in contrast
   // to the specification.  12.6.4 mandates a call to ToObject.
-  frame_->Pop(eax);
+  frame_->EmitPop(eax);
 
   // eax: value to be iterated over
   __ cmp(eax, Factory::undefined_value());
-  __ j(equal, &exit);
+  exit.Branch(equal);
   __ cmp(eax, Factory::null_value());
-  __ j(equal, &exit);
+  exit.Branch(equal);
 
   // Stack layout in body:
   // [iteration counter (smi)] <- slot 0
@@ -1902,26 +2501,24 @@
   // Check if enumerable is already a JSObject
   // eax: value to be iterated over
   __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &primitive);
+  primitive.Branch(zero);
   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
   __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
-  __ j(above_equal, &jsobject);
+  jsobject.Branch(above_equal);
 
-  __ bind(&primitive);
-  frame_->Push(eax);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  primitive.Bind();
+  frame_->EmitPush(eax);
+  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
   // function call returns the value in eax, which is where we want it below
 
-
-  __ bind(&jsobject);
-
+  jsobject.Bind();
   // Get the set of properties (as a FixedArray or Map).
   // eax: value to be iterated over
-  frame_->Push(eax);  // push the object being iterated over (slot 4)
+  frame_->EmitPush(eax);  // push the object being iterated over (slot 4)
 
-  frame_->Push(eax);  // push the Object (slot 4) for the runtime call
-  __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+  frame_->EmitPush(eax);  // push the Object (slot 4) for the runtime call
+  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
 
   // If we got a Map, we can do a fast modification check.
   // Otherwise, we got a FixedArray, and we have to do a slow check.
@@ -1930,7 +2527,7 @@
   __ mov(edx, Operand(eax));
   __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
   __ cmp(ecx, Factory::meta_map());
-  __ j(not_equal, &fixed_array);
+  fixed_array.Branch(not_equal);
 
   // Get enum cache
   // eax: map (result from call to Runtime::kGetPropertyNamesFast)
@@ -1941,85 +2538,75 @@
   // Get the cache from the bridge array.
   __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
-  frame_->Push(eax);  // <- slot 3
-  frame_->Push(edx);  // <- slot 2
+  frame_->EmitPush(eax);  // <- slot 3
+  frame_->EmitPush(edx);  // <- slot 2
   __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
   __ shl(eax, kSmiTagSize);
-  frame_->Push(eax);  // <- slot 1
-  frame_->Push(Immediate(Smi::FromInt(0)));  // <- slot 0
-  __ jmp(&entry);
+  frame_->EmitPush(eax);  // <- slot 1
+  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
+  entry.Jump();
 
-
-  __ bind(&fixed_array);
-
+  fixed_array.Bind();
   // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
-  frame_->Push(Immediate(Smi::FromInt(0)));  // <- slot 3
-  frame_->Push(eax);  // <- slot 2
+  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 3
+  frame_->EmitPush(eax);  // <- slot 2
 
   // Push the length of the array and the initial index onto the stack.
   __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
   __ shl(eax, kSmiTagSize);
-  frame_->Push(eax);  // <- slot 1
-  frame_->Push(Immediate(Smi::FromInt(0)));  // <- slot 0
-  __ jmp(&entry);
-
-  // Body.
-  __ bind(&loop);
-  Visit(node->body());
-
-  // Next.
-  __ bind(node->continue_target());
-  __ bind(&next);
-  frame_->Pop(eax);
-  __ add(Operand(eax), Immediate(Smi::FromInt(1)));
-  frame_->Push(eax);
+  frame_->EmitPush(eax);  // <- slot 1
+  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
 
   // Condition.
-  __ bind(&entry);
+  entry.Bind();
+  // Grab the current frame's height for the break and continue
+  // targets only after all the state is pushed on the frame.
+  node->break_target()->Initialize(this);
+  node->continue_target()->Initialize(this);
 
-  __ mov(eax, frame_->Element(0));  // load the current count
-  __ cmp(eax, frame_->Element(1));  // compare to the array length
-  __ j(above_equal, &cleanup);
+  __ mov(eax, frame_->ElementAt(0));  // load the current count
+  __ cmp(eax, frame_->ElementAt(1));  // compare to the array length
+  node->break_target()->Branch(above_equal);
 
   // Get the i'th entry of the array.
-  __ mov(edx, frame_->Element(2));
+  __ mov(edx, frame_->ElementAt(2));
   __ mov(ebx, Operand(edx, eax, times_2,
                       FixedArray::kHeaderSize - kHeapObjectTag));
 
   // Get the expected map from the stack or a zero map in the
   // permanent slow case eax: current iteration count ebx: i'th entry
   // of the enum cache
-  __ mov(edx, frame_->Element(3));
+  __ mov(edx, frame_->ElementAt(3));
   // Check if the expected map still matches that of the enumerable.
   // If not, we have to filter the key.
   // eax: current iteration count
   // ebx: i'th entry of the enum cache
   // edx: expected map value
-  __ mov(ecx, frame_->Element(4));
+  __ mov(ecx, frame_->ElementAt(4));
   __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
   __ cmp(ecx, Operand(edx));
-  __ j(equal, &end_del_check);
+  end_del_check.Branch(equal);
 
   // Convert the entry to a string (or null if it isn't a property anymore).
-  frame_->Push(frame_->Element(4));  // push enumerable
-  frame_->Push(ebx);  // push entry
-  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+  frame_->EmitPush(frame_->ElementAt(4));  // push enumerable
+  frame_->EmitPush(ebx);  // push entry
+  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
   __ mov(ebx, Operand(eax));
 
   // If the property has been removed while iterating, we just skip it.
   __ cmp(ebx, Factory::null_value());
-  __ j(equal, &next);
+  node->continue_target()->Branch(equal);
 
-
-  __ bind(&end_del_check);
-
-  // Store the entry in the 'each' expression and take another spin in the loop.
-  // edx: i'th entry of the enum cache (or string there of)
-  frame_->Push(ebx);
+  end_del_check.Bind();
+  // Store the entry in the 'each' expression and take another spin in the
+  // loop.  edx: i'th entry of the enum cache (or string there of)
+  frame_->EmitPush(ebx);
   { Reference each(this, node->each());
+    // Loading a reference may leave the frame in an unspilled state.
+    frame_->SpillAll();
     if (!each.is_illegal()) {
       if (each.size() > 0) {
-        frame_->Push(frame_->Element(each.size()));
+        frame_->EmitPush(frame_->ElementAt(each.size()));
       }
       // If the reference was to a slot we rely on the convenient property
       // that it doesn't matter whether a value (eg, ebx pushed above) is
@@ -2031,37 +2618,52 @@
         // ie, now the topmost value of the non-zero sized reference), since
         // we will discard the top of stack after unloading the reference
         // anyway.
-        frame_->Pop();
+        frame_->Drop();
       }
     }
   }
+  // Unloading a reference may leave the frame in an unspilled state.
+  frame_->SpillAll();
+
   // Discard the i'th entry pushed above or else the remainder of the
   // reference, whichever is currently on top of the stack.
-  frame_->Pop();
+  frame_->Drop();
+
+  // Body.
   CheckStack();  // TODO(1222600): ignore if body contains calls.
-  __ jmp(&loop);
+  VisitAndSpill(node->body());
+
+  // Next.
+  node->continue_target()->Bind();
+  frame_->EmitPop(eax);
+  __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+  frame_->EmitPush(eax);
+  entry.Jump();
 
   // Cleanup.
-  __ bind(&cleanup);
-  __ bind(node->break_target());
+  node->break_target()->Bind();
   frame_->Drop(5);
 
   // Exit.
-  __ bind(&exit);
+  exit.Bind();
 
-  break_stack_height_ -= kForInStackSize;
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
 }
 
 
 void CodeGenerator::VisitTryCatch(TryCatch* node) {
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ TryCatch");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
-  Label try_block, exit;
+  JumpTarget try_block(this);
+  JumpTarget exit(this);
 
-  __ call(&try_block);
+  try_block.Call();
   // --- Catch block ---
-  frame_->Push(eax);
+  frame_->EmitPush(eax);
 
   // Store the caught exception in the catch variable.
   { Reference ref(this, node->catch_var());
@@ -2073,46 +2675,54 @@
   }
 
   // Remove the exception from the stack.
-  frame_->Pop();
+  frame_->Drop();
 
-  VisitStatements(node->catch_block()->statements());
-  __ jmp(&exit);
+  VisitStatementsAndSpill(node->catch_block()->statements());
+  if (has_valid_frame()) {
+    exit.Jump();
+  }
 
 
   // --- Try block ---
-  __ bind(&try_block);
+  try_block.Bind();
 
-  __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
-  // TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
-  frame_->Push(eax);  //
+  frame_->PushTryHandler(TRY_CATCH_HANDLER);
+  int handler_height = frame_->height();
 
-  // Shadow the labels for all escapes from the try block, including
-  // returns.  During shadowing, the original label is hidden as the
-  // LabelShadow and operations on the original actually affect the
-  // shadowing label.
+  // Shadow the jump targets for all escapes from the try block, including
+  // returns.  During shadowing, the original target is hidden as the
+  // ShadowTarget and operations on the original actually affect the
+  // shadowing target.
   //
-  // We should probably try to unify the escaping labels and the return
-  // label.
-  int nof_escapes = node->escaping_labels()->length();
-  List<LabelShadow*> shadows(1 + nof_escapes);
-  shadows.Add(new LabelShadow(&function_return_));
+  // We should probably try to unify the escaping targets and the return
+  // target.
+  int nof_escapes = node->escaping_targets()->length();
+  List<ShadowTarget*> shadows(1 + nof_escapes);
+
+  // Add the shadow target for the function return.
+  static const int kReturnShadowIndex = 0;
+  shadows.Add(new ShadowTarget(&function_return_));
+  bool function_return_was_shadowed = function_return_is_shadowed_;
+  function_return_is_shadowed_ = true;
+  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+  // Add the remaining shadow targets.
   for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new LabelShadow(node->escaping_labels()->at(i)));
+    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
   }
 
   // Generate code for the statements in the try block.
-  { TempAssign<bool> temp(&is_inside_try_, true);
-    VisitStatements(node->try_block()->statements());
-  }
+  VisitStatementsAndSpill(node->try_block()->statements());
 
   // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original labels are unshadowed and the
-  // LabelShadows represent the formerly shadowing labels.
-  int nof_unlinks = 0;
-  for (int i = 0; i <= nof_escapes; i++) {
+  // After shadowing stops, the original targets are unshadowed and the
+  // ShadowTargets represent the formerly shadowing targets.
+  bool has_unlinks = false;
+  for (int i = 0; i < shadows.length(); i++) {
     shadows[i]->StopShadowing();
-    if (shadows[i]->is_linked()) nof_unlinks++;
+    has_unlinks = has_unlinks || shadows[i]->is_linked();
   }
+  function_return_is_shadowed_ = function_return_was_shadowed;
 
   // Get an external reference to the handler address.
   ExternalReference handler_address(Top::k_handler_address);
@@ -2126,19 +2736,27 @@
     __ Assert(equal, "stack pointer should point to top handler");
   }
 
-  // Unlink from try chain.
-  frame_->Pop(eax);
-  __ mov(Operand::StaticVariable(handler_address), eax);  // TOS == next_sp
-  frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-  // next_sp popped.
-  if (nof_unlinks > 0) __ jmp(&exit);
+  // If we can fall off the end of the try block, unlink from try chain.
+  if (has_valid_frame()) {
+    // The next handler address is on top of the frame.  Unlink from
+    // the handler list and drop the rest of this handler from the
+    // frame.
+    frame_->EmitPop(Operand::StaticVariable(handler_address));
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+    if (has_unlinks) {
+      exit.Jump();
+    }
+  }
 
-  // Generate unlink code for the (formerly) shadowing labels that have been
-  // jumped to.
-  for (int i = 0; i <= nof_escapes; i++) {
+  // Generate unlink code for the (formerly) shadowing targets that have been
+  // jumped to.  Deallocate each shadow target.
+  for (int i = 0; i < shadows.length(); i++) {
     if (shadows[i]->is_linked()) {
       // Unlink from try chain; be careful not to destroy the TOS.
-      __ bind(shadows[i]);
+      shadows[i]->Bind();
+      // Because we can be jumping here (to spilled code) from unspilled
+      // code, we need to reestablish a spilled frame at this block.
+      frame_->SpillAll();
 
       // Reload sp from the top handler, because some statements that we
       // break from (eg, for...in) may have left stuff on the stack.
@@ -2146,159 +2764,210 @@
       const int kNextOffset = StackHandlerConstants::kNextOffset +
           StackHandlerConstants::kAddressDisplacement;
       __ lea(esp, Operand(edx, kNextOffset));
+      frame_->Forget(frame_->height() - handler_height);
 
-      frame_->Pop(Operand::StaticVariable(handler_address));
+      frame_->EmitPop(Operand::StaticVariable(handler_address));
       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
       // next_sp popped.
-      __ jmp(shadows[i]->original_label());
+
+      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
+        frame_->PrepareForReturn();
+      }
+      shadows[i]->other_target()->Jump();
     }
+    delete shadows[i];
   }
 
-  __ bind(&exit);
+  exit.Bind();
 }
 
 
 void CodeGenerator::VisitTryFinally(TryFinally* node) {
+  ASSERT(!in_spilled_code());
+  VirtualFrame::SpilledScope spilled_scope(this);
   Comment cmnt(masm_, "[ TryFinally");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
   // State: Used to keep track of reason for entering the finally
   // block. Should probably be extended to hold information for
   // break/continue from within the try block.
   enum { FALLING, THROWING, JUMPING };
 
-  Label exit, unlink, try_block, finally_block;
+  JumpTarget try_block(this);
+  JumpTarget finally_block(this);
 
-  __ call(&try_block);
+  try_block.Call();
 
-  frame_->Push(eax);
+  frame_->EmitPush(eax);
   // In case of thrown exceptions, this is where we continue.
   __ Set(ecx, Immediate(Smi::FromInt(THROWING)));
-  __ jmp(&finally_block);
-
+  finally_block.Jump();
 
   // --- Try block ---
-  __ bind(&try_block);
+  try_block.Bind();
 
-  __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
-  // TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
-  frame_->Push(eax);
+  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
+  int handler_height = frame_->height();
 
-  // Shadow the labels for all escapes from the try block, including
-  // returns.  During shadowing, the original label is hidden as the
-  // LabelShadow and operations on the original actually affect the
-  // shadowing label.
+  // Shadow the jump targets for all escapes from the try block, including
+  // returns.  During shadowing, the original target is hidden as the
+  // ShadowTarget and operations on the original actually affect the
+  // shadowing target.
   //
-  // We should probably try to unify the escaping labels and the return
-  // label.
-  int nof_escapes = node->escaping_labels()->length();
-  List<LabelShadow*> shadows(1 + nof_escapes);
-  shadows.Add(new LabelShadow(&function_return_));
+  // We should probably try to unify the escaping targets and the return
+  // target.
+  int nof_escapes = node->escaping_targets()->length();
+  List<ShadowTarget*> shadows(1 + nof_escapes);
+
+  // Add the shadow target for the function return.
+  static const int kReturnShadowIndex = 0;
+  shadows.Add(new ShadowTarget(&function_return_));
+  bool function_return_was_shadowed = function_return_is_shadowed_;
+  function_return_is_shadowed_ = true;
+  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+  // Add the remaining shadow targets.
   for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new LabelShadow(node->escaping_labels()->at(i)));
+    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
   }
 
   // Generate code for the statements in the try block.
-  { TempAssign<bool> temp(&is_inside_try_, true);
-    VisitStatements(node->try_block()->statements());
-  }
+  VisitStatementsAndSpill(node->try_block()->statements());
 
   // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original labels are unshadowed and the
-  // LabelShadows represent the formerly shadowing labels.
+  // After shadowing stops, the original targets are unshadowed and the
+  // ShadowTargets represent the formerly shadowing targets.
   int nof_unlinks = 0;
-  for (int i = 0; i <= nof_escapes; i++) {
+  for (int i = 0; i < shadows.length(); i++) {
     shadows[i]->StopShadowing();
     if (shadows[i]->is_linked()) nof_unlinks++;
   }
+  function_return_is_shadowed_ = function_return_was_shadowed;
 
-  // Set the state on the stack to FALLING.
-  frame_->Push(Immediate(Factory::undefined_value()));  // fake TOS
-  __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
-  if (nof_unlinks > 0) __ jmp(&unlink);
+  // Get an external reference to the handler address.
+  ExternalReference handler_address(Top::k_handler_address);
 
-  // Generate code to set the state for the (formerly) shadowing labels that
-  // have been jumped to.
-  for (int i = 0; i <= nof_escapes; i++) {
+  // If we can fall off the end of the try block, unlink from the try
+  // chain and set the state on the frame to FALLING.
+  if (has_valid_frame()) {
+    // The next handler address is on top of the frame.
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    frame_->EmitPop(eax);
+    __ mov(Operand::StaticVariable(handler_address), eax);
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+    // Fake a top of stack value (unneeded when FALLING) and set the
+    // state in ecx, then jump around the unlink blocks if any.
+    frame_->EmitPush(Immediate(Factory::undefined_value()));
+    __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
+    if (nof_unlinks > 0) {
+      finally_block.Jump();
+    }
+  }
+
+  // Generate code to unlink and set the state for the (formerly)
+  // shadowing targets that have been jumped to.
+  for (int i = 0; i < shadows.length(); i++) {
     if (shadows[i]->is_linked()) {
-      __ bind(shadows[i]);
-      if (shadows[i]->original_label() == &function_return_) {
-        // If this label shadowed the function return, materialize the
-        // return value on the stack.
-        frame_->Push(eax);
+      // If we have come from the shadowed return, the return value is
+      // in (a non-refcounted reference to) eax.  We must preserve it
+      // until it is pushed.
+      //
+      // Because we can be jumping here (to spilled code) from
+      // unspilled code, we need to reestablish a spilled frame at
+      // this block.
+      shadows[i]->Bind();
+      frame_->SpillAll();
+
+      // Reload sp from the top handler, because some statements that
+      // we break from (eg, for...in) may have left stuff on the
+      // stack.
+      __ mov(edx, Operand::StaticVariable(handler_address));
+      const int kNextOffset = StackHandlerConstants::kNextOffset +
+          StackHandlerConstants::kAddressDisplacement;
+      __ lea(esp, Operand(edx, kNextOffset));
+      frame_->Forget(frame_->height() - handler_height);
+
+      // Unlink this handler and drop it from the frame.
+      frame_->EmitPop(Operand::StaticVariable(handler_address));
+      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+      if (i == kReturnShadowIndex) {
+        // If this target shadowed the function return, materialize
+        // the return value on the stack.
+        frame_->EmitPush(eax);
       } else {
-        // Fake TOS for labels that shadowed breaks and continues.
-        frame_->Push(Immediate(Factory::undefined_value()));
+        // Fake TOS for targets that shadowed breaks and continues.
+        frame_->EmitPush(Immediate(Factory::undefined_value()));
       }
       __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
-      __ jmp(&unlink);
+      if (--nof_unlinks > 0) {
+        // If this is not the last unlink block, jump around the next.
+        finally_block.Jump();
+      }
     }
   }
 
-  // Unlink from try chain; be careful not to destroy the TOS.
-  __ bind(&unlink);
-  // Reload sp from the top handler, because some statements that we
-  // break from (eg, for...in) may have left stuff on the stack.
-  // Preserve the TOS in a register across stack manipulation.
-  frame_->Pop(eax);
-  ExternalReference handler_address(Top::k_handler_address);
-  __ mov(edx, Operand::StaticVariable(handler_address));
-  const int kNextOffset = StackHandlerConstants::kNextOffset +
-      StackHandlerConstants::kAddressDisplacement;
-  __ lea(esp, Operand(edx, kNextOffset));
-
-  frame_->Pop(Operand::StaticVariable(handler_address));
-  frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-  // Next_sp popped.
-  frame_->Push(eax);
-
   // --- Finally block ---
-  __ bind(&finally_block);
+  finally_block.Bind();
 
   // Push the state on the stack.
-  frame_->Push(ecx);
+  frame_->EmitPush(ecx);
 
   // We keep two elements on the stack - the (possibly faked) result
-  // and the state - while evaluating the finally block. Record it, so
-  // that a break/continue crossing this statement can restore the
-  // stack.
-  const int kFinallyStackSize = 2 * kPointerSize;
-  break_stack_height_ += kFinallyStackSize;
-
+  // and the state - while evaluating the finally block.
+  //
   // Generate code for the statements in the finally block.
-  VisitStatements(node->finally_block()->statements());
+  VisitStatementsAndSpill(node->finally_block()->statements());
 
-  // Restore state and return value or faked TOS.
-  frame_->Pop(ecx);
-  frame_->Pop(eax);
-  break_stack_height_ -= kFinallyStackSize;
-
-  // Generate code to jump to the right destination for all used (formerly)
-  // shadowing labels.
-  for (int i = 0; i <= nof_escapes; i++) {
-    if (shadows[i]->is_bound()) {
-      __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
-      __ j(equal, shadows[i]->original_label());
-    }
+  if (has_valid_frame()) {
+    // Restore state and return value or faked TOS.
+    frame_->EmitPop(ecx);
+    frame_->EmitPop(eax);
   }
 
-  // Check if we need to rethrow the exception.
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
-  __ j(not_equal, &exit);
+  // Generate code to jump to the right destination for all used
+  // formerly shadowing targets.  Deallocate each shadow target.
+  for (int i = 0; i < shadows.length(); i++) {
+    if (has_valid_frame() && shadows[i]->is_bound()) {
+      JumpTarget* original = shadows[i]->other_target();
+      __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
+      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
+        JumpTarget skip(this);
+        skip.Branch(not_equal);
+        frame_->PrepareForReturn();
+        original->Jump();
+        skip.Bind();
+      } else {
+        original->Branch(equal);
+      }
+    }
+    delete shadows[i];
+  }
 
-  // Rethrow exception.
-  frame_->Push(eax);  // undo pop from above
-  __ CallRuntime(Runtime::kReThrow, 1);
+  if (has_valid_frame()) {
+    // Check if we need to rethrow the exception.
+    JumpTarget exit(this);
+    __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
+    exit.Branch(not_equal);
 
-  // Done.
-  __ bind(&exit);
+    // Rethrow exception.
+    frame_->EmitPush(eax);  // undo pop from above
+    frame_->CallRuntime(Runtime::kReThrow, 1);
+
+    // Done.
+    exit.Bind();
+  }
 }
 
 
 void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+  ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ DebuggerStatement");
-  CodeForStatement(node);
-  __ CallRuntime(Runtime::kDebugBreak, 0);
+  CodeForStatementPosition(node);
+  // Spill everything, even constants, to the frame.
+  frame_->SpillAll();
+  frame_->CallRuntime(Runtime::kDebugBreak, 0);
   // Ignore the return value.
 }
 
@@ -2307,12 +2976,12 @@
   ASSERT(boilerplate->IsBoilerplate());
 
   // Push the boilerplate on the stack.
-  frame_->Push(Immediate(boilerplate));
+  frame_->Push(boilerplate);
 
   // Create a new closure.
   frame_->Push(esi);
-  __ CallRuntime(Runtime::kNewClosure, 2);
-  frame_->Push(eax);
+  Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
+  frame_->Push(&result);
 }
 
 
@@ -2336,15 +3005,33 @@
 
 void CodeGenerator::VisitConditional(Conditional* node) {
   Comment cmnt(masm_, "[ Conditional");
-  Label then, else_, exit;
-  LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &else_, true);
-  Branch(false, &else_);
-  __ bind(&then);
-  Load(node->then_expression(), typeof_state());
-  __ jmp(&exit);
-  __ bind(&else_);
-  Load(node->else_expression(), typeof_state());
-  __ bind(&exit);
+  JumpTarget then(this);
+  JumpTarget else_(this);
+  JumpTarget exit(this);
+  ControlDestination dest(&then, &else_, true);
+  LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+
+  if (dest.false_was_fall_through()) {
+    // The else target was bound, so we compile the else part first.
+    Load(node->else_expression(), typeof_state());
+
+    if (then.is_linked()) {
+      exit.Jump();
+      then.Bind();
+      Load(node->then_expression(), typeof_state());
+    }
+  } else {
+    // The then target was bound, so we compile the then part first.
+    Load(node->then_expression(), typeof_state());
+
+    if (else_.is_linked()) {
+      exit.Jump();
+      else_.Bind();
+      Load(node->else_expression(), typeof_state());
+    }
+  }
+
+  exit.Bind();
 }
 
 
@@ -2352,7 +3039,9 @@
   if (slot->type() == Slot::LOOKUP) {
     ASSERT(slot->var()->is_dynamic());
 
-    Label slow, done;
+    JumpTarget slow(this);
+    JumpTarget done(this);
+    Result value(this);
 
     // Generate fast-case code for variables that might be shadowed by
     // eval-introduced variables.  Eval is used a lot without
@@ -2360,75 +3049,108 @@
     // perform a runtime call for all variables in the scope
     // containing the eval.
     if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-      LoadFromGlobalSlotCheckExtensions(slot, typeof_state, ebx, &slow);
-      __ jmp(&done);
+      value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
+      // If there was no control flow to slow, we can exit early.
+      if (!slow.is_linked()) {
+        frame_->Push(&value);
+        return;
+      }
+
+      done.Jump(&value);
 
     } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
       Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
       // Only generate the fast case for locals that rewrite to slots.
       // This rules out argument loads.
       if (potential_slot != NULL) {
-        __ mov(eax,
+        // Allocate a fresh register to use as a temp in
+        // ContextSlotOperandCheckExtensions and to hold the result
+        // value.
+        value = allocator_->Allocate();
+        ASSERT(value.is_valid());
+        __ mov(value.reg(),
                ContextSlotOperandCheckExtensions(potential_slot,
-                                                 ebx,
+                                                 value,
                                                  &slow));
-        __ jmp(&done);
+        // There is always control flow to slow from
+        // ContextSlotOperandCheckExtensions.
+        done.Jump(&value);
       }
     }
 
-    __ bind(&slow);
+    slow.Bind();
     frame_->Push(esi);
-    frame_->Push(Immediate(slot->var()->name()));
+    frame_->Push(slot->var()->name());
     if (typeof_state == INSIDE_TYPEOF) {
-      __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+      value =
+          frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
     } else {
-      __ CallRuntime(Runtime::kLoadContextSlot, 2);
+      value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
     }
 
-    __ bind(&done);
-    frame_->Push(eax);
+    done.Bind(&value);
+    frame_->Push(&value);
+
+  } else if (slot->var()->mode() == Variable::CONST) {
+    // Const slots may contain 'the hole' value (the constant hasn't been
+    // initialized yet) which needs to be converted into the 'undefined'
+    // value.
+    //
+    // We currently spill the virtual frame because constants use the
+    // potentially unsafe direct-frame access of SlotOperand.
+    VirtualFrame::SpilledScope spilled_scope(this);
+    Comment cmnt(masm_, "[ Load const");
+    JumpTarget exit(this);
+    __ mov(ecx, SlotOperand(slot, ecx));
+    __ cmp(ecx, Factory::the_hole_value());
+    exit.Branch(not_equal);
+    __ mov(ecx, Factory::undefined_value());
+    exit.Bind();
+    frame_->EmitPush(ecx);
+
+  } else if (slot->type() == Slot::PARAMETER) {
+    frame_->PushParameterAt(slot->index());
+
+  } else if (slot->type() == Slot::LOCAL) {
+    frame_->PushLocalAt(slot->index());
 
   } else {
-    // Note: We would like to keep the assert below, but it fires because of
-    // some nasty code in LoadTypeofExpression() which should be removed...
-    // ASSERT(!slot->var()->is_dynamic());
-    if (slot->var()->mode() == Variable::CONST) {
-      // Const slots may contain 'the hole' value (the constant hasn't been
-      // initialized yet) which needs to be converted into the 'undefined'
-      // value.
-      Comment cmnt(masm_, "[ Load const");
-      Label exit;
-      __ mov(eax, SlotOperand(slot, ecx));
-      __ cmp(eax, Factory::the_hole_value());
-      __ j(not_equal, &exit);
-      __ mov(eax, Factory::undefined_value());
-      __ bind(&exit);
-      frame_->Push(eax);
-    } else {
-      frame_->Push(SlotOperand(slot, ecx));
-    }
+    // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
+    // here.
+    //
+    // The use of SlotOperand below is safe for an unspilled frame
+    // because it will always be a context slot.
+    ASSERT(slot->type() == Slot::CONTEXT);
+    Result temp = allocator_->Allocate();
+    ASSERT(temp.is_valid());
+    __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
+    frame_->Push(&temp);
   }
 }
 
 
-void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                                      TypeofState typeof_state,
-                                                      Register tmp,
-                                                      Label* slow) {
+Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
+    Slot* slot,
+    TypeofState typeof_state,
+    JumpTarget* slow) {
   // Check that no extension objects have been created by calls to
   // eval from the current scope to the global scope.
-  Register context = esi;
+  Result context(esi, this);
+  Result tmp = allocator_->Allocate();
+  ASSERT(tmp.is_valid());  // All non-reserved registers were available.
+
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
       if (s->calls_eval()) {
         // Check that extension is NULL.
-        __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
-        __ j(not_equal, slow, not_taken);
+        __ cmp(ContextOperand(context.reg(), Context::EXTENSION_INDEX),
+               Immediate(0));
+        slow->Branch(not_equal, not_taken);
       }
       // Load next context in chain.
-      __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
-      __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+      __ mov(tmp.reg(), ContextOperand(context.reg(), Context::CLOSURE_INDEX));
+      __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
       context = tmp;
     }
     // If no outer scope calls eval, we do not need to check more
@@ -2439,39 +3161,138 @@
   }
 
   if (s->is_eval_scope()) {
+    // Loop up the context chain.  There is no frame effect so it is
+    // safe to use raw labels here.
     Label next, fast;
-    if (!context.is(tmp)) __ mov(tmp, Operand(context));
+    if (!context.reg().is(tmp.reg())) __ mov(tmp.reg(), context.reg());
     __ bind(&next);
     // Terminate at global context.
-    __ cmp(FieldOperand(tmp, HeapObject::kMapOffset),
+    __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
            Immediate(Factory::global_context_map()));
     __ j(equal, &fast);
     // Check that extension is NULL.
-    __ cmp(ContextOperand(tmp, Context::EXTENSION_INDEX), Immediate(0));
-    __ j(not_equal, slow, not_taken);
+    __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
+    slow->Branch(not_equal, not_taken);
     // Load next context in chain.
-    __ mov(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
-    __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+    __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
+    __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
     __ jmp(&next);
     __ bind(&fast);
   }
+  context.Unuse();
+  tmp.Unuse();
 
   // All extension objects were empty and it is safe to use a global
   // load IC call.
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
   // Load the global object.
   LoadGlobal();
-  // Setup the name register.
-  __ mov(ecx, slot->var()->name());
-  // Call IC stub.
-  if (typeof_state == INSIDE_TYPEOF) {
-    __ call(ic, RelocInfo::CODE_TARGET);
-  } else {
-    __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-  }
+  // Setup the name register.  All non-reserved registers are available.
+  Result name = allocator_->Allocate(ecx);
+  ASSERT(name.is_valid());
+  __ mov(name.reg(), slot->var()->name());
+  RelocInfo::Mode rmode = (typeof_state == INSIDE_TYPEOF)
+                        ? RelocInfo::CODE_TARGET
+                        : RelocInfo::CODE_TARGET_CONTEXT;
+  Result answer = frame_->CallCodeObject(ic, rmode, &name, 0);
 
-  // Pop the global object. The result is in eax.
-  frame_->Pop();
+  // Discard the global object. The result is in answer.
+  frame_->Drop();
+  return answer;
+}
+
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+  if (slot->type() == Slot::LOOKUP) {
+    ASSERT(slot->var()->is_dynamic());
+
+    // For now, just do a runtime call.
+    frame_->Push(esi);
+    frame_->Push(slot->var()->name());
+
+    Result value(this);
+    if (init_state == CONST_INIT) {
+      // Same as the case for a normal store, but ignores attribute
+      // (e.g. READ_ONLY) of context slot so that we can initialize const
+      // properties (introduced via eval("const foo = (some expr);")). Also,
+      // uses the current function context instead of the top context.
+      //
+      // Note that we must declare the foo upon entry of eval(), via a
+      // context slot declaration, but we cannot initialize it at the same
+      // time, because the const declaration may be at the end of the eval
+      // code (sigh...) and the const variable may have been used before
+      // (where its value is 'undefined'). Thus, we can only do the
+      // initialization when we actually encounter the expression and when
+      // the expression operands are defined and valid, and thus we need the
+      // split into 2 operations: declaration of the context slot followed
+      // by initialization.
+      value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+    } else {
+      value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+    }
+    // Storing a variable must keep the (new) value on the expression
+    // stack. This is necessary for compiling chained assignment
+    // expressions.
+    frame_->Push(&value);
+
+  } else {
+    ASSERT(!slot->var()->is_dynamic());
+
+    JumpTarget exit(this);
+    if (init_state == CONST_INIT) {
+      ASSERT(slot->var()->mode() == Variable::CONST);
+      // Only the first const initialization must be executed (the slot
+      // still contains 'the hole' value). When the assignment is executed,
+      // the code is identical to a normal store (see below).
+      //
+      // We spill the frame in the code below because the direct-frame
+      // access of SlotOperand is potentially unsafe with an unspilled
+      // frame.
+      VirtualFrame::SpilledScope spilled_scope(this);
+      Comment cmnt(masm_, "[ Init const");
+      __ mov(ecx, SlotOperand(slot, ecx));
+      __ cmp(ecx, Factory::the_hole_value());
+      exit.Branch(not_equal);
+    }
+
+    // We must execute the store.  Storing a variable must keep the (new)
+    // value on the stack. This is necessary for compiling assignment
+    // expressions.
+    //
+    // Note: We will reach here even with slot->var()->mode() ==
+    // Variable::CONST because of const declarations which will initialize
+    // consts to 'the hole' value and by doing so, end up calling this code.
+    if (slot->type() == Slot::PARAMETER) {
+      frame_->StoreToParameterAt(slot->index());
+    } else if (slot->type() == Slot::LOCAL) {
+      frame_->StoreToLocalAt(slot->index());
+    } else {
+      // The other slot types (LOOKUP and GLOBAL) cannot reach here.
+      //
+      // The use of SlotOperand below is safe for an unspilled frame
+      // because the slot is a context slot.
+      ASSERT(slot->type() == Slot::CONTEXT);
+      frame_->Dup();
+      Result value = frame_->Pop();
+      value.ToRegister();
+      Result start = allocator_->Allocate();
+      ASSERT(start.is_valid());
+      __ mov(SlotOperand(slot, start.reg()), value.reg());
+      // RecordWrite may destroy the value registers.
+      //
+      // TODO(204): Avoid actually spilling when the value is not
+      // needed (probably the common case).
+      frame_->Spill(value.reg());
+      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+      Result temp = allocator_->Allocate();
+      ASSERT(temp.is_valid());
+      __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
+      // The results start, value, and temp are unused by going out of
+      // scope.
+    }
+
+    exit.Bind();
+  }
 }
 
 
@@ -2497,73 +3318,95 @@
 
 void CodeGenerator::VisitLiteral(Literal* node) {
   Comment cmnt(masm_, "[ Literal");
-  if (node->handle()->IsSmi() && !IsInlineSmi(node)) {
-    // To prevent long attacker-controlled byte sequences in code, larger
-    // Smis are loaded in two steps.
-    int bits = reinterpret_cast<int>(*node->handle());
-    __ mov(eax, bits & 0x0000FFFF);
-    __ xor_(eax, bits & 0xFFFF0000);
-    frame_->Push(eax);
-  } else {
-    frame_->Push(Immediate(node->handle()));
+    frame_->Push(node->handle());
   }
+
+
+void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
+  ASSERT(target.is_valid());
+  ASSERT(value->IsSmi());
+  int bits = reinterpret_cast<int>(*value);
+  __ Set(target, Immediate(bits & 0x0000FFFF));
+  __ xor_(target, bits & 0xFFFF0000);
 }
 
 
-class RegExpDeferred: public DeferredCode {
+bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
+  if (!value->IsSmi()) return false;
+  int int_value = Smi::cast(*value)->value();
+  return !is_intn(int_value, kMaxSmiInlinedBits);
+}
+
+
+class DeferredRegExpLiteral: public DeferredCode {
  public:
-  RegExpDeferred(CodeGenerator* generator, RegExpLiteral* node)
+  DeferredRegExpLiteral(CodeGenerator* generator, RegExpLiteral* node)
       : DeferredCode(generator), node_(node) {
-    set_comment("[ RegExpDeferred");
+    set_comment("[ DeferredRegExpLiteral");
   }
+
   virtual void Generate();
+
  private:
   RegExpLiteral* node_;
 };
 
 
-void RegExpDeferred::Generate() {
-  // If the entry is undefined we call the runtime system to computed
-  // the literal.
+void DeferredRegExpLiteral::Generate() {
+  Result literals(generator());
+  enter()->Bind(&literals);
+  // Since the entry is undefined we call the runtime system to
+  // compute the literal.
 
+  VirtualFrame* frame = generator()->frame();
   // Literal array (0).
-  __ push(ecx);
+  frame->Push(&literals);
   // Literal index (1).
-  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  frame->Push(Smi::FromInt(node_->literal_index()));
   // RegExp pattern (2).
-  __ push(Immediate(node_->pattern()));
+  frame->Push(node_->pattern());
   // RegExp flags (3).
-  __ push(Immediate(node_->flags()));
-  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
-  __ mov(ebx, Operand(eax));  // "caller" expects result in ebx
+  frame->Push(node_->flags());
+  Result boilerplate =
+      frame->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  exit_.Jump(&boilerplate);
 }
 
 
 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
   Comment cmnt(masm_, "[ RegExp Literal");
-  RegExpDeferred* deferred = new RegExpDeferred(this, node);
+  DeferredRegExpLiteral* deferred = new DeferredRegExpLiteral(this, node);
 
-  // Retrieve the literal array and check the allocated entry.
-
-  // Load the function of this activation.
-  __ mov(ecx, frame_->Function());
+  // Retrieve the literals array and check the allocated entry.  Begin
+  // with a writable copy of the function of this activation in a
+  // register.
+  frame_->PushFunction();
+  Result literals = frame_->Pop();
+  literals.ToRegister();
+  frame_->Spill(literals.reg());
 
   // Load the literals array of the function.
-  __ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset));
+  __ mov(literals.reg(),
+         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
 
   // Load the literal at the ast saved index.
   int literal_offset =
       FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
-  __ mov(ebx, FieldOperand(ecx, literal_offset));
+  Result boilerplate = allocator_->Allocate();
+  ASSERT(boilerplate.is_valid());
+  __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
 
-  // Check whether we need to materialize the RegExp object.
-  // If so, jump to the deferred code.
-  __ cmp(ebx, Factory::undefined_value());
-  __ j(equal, deferred->enter(), not_taken);
-  __ bind(deferred->exit());
+  // Check whether we need to materialize the RegExp object.  If so,
+  // jump to the deferred code passing the literals array.
+  __ cmp(boilerplate.reg(), Factory::undefined_value());
+  deferred->enter()->Branch(equal, &literals, not_taken);
 
-  // Push the literal.
-  frame_->Push(ebx);
+  literals.Unuse();
+  // The deferred code returns the boilerplate object.
+  deferred->BindExit(&boilerplate);
+
+  // Push the boilerplate object.
+  frame_->Push(&boilerplate);
 }
 
 
@@ -2571,116 +3414,137 @@
 // by calling Runtime_CreateObjectLiteral.
 // Each created boilerplate is stored in the JSFunction and they are
 // therefore context dependent.
-class ObjectLiteralDeferred: public DeferredCode {
+class DeferredObjectLiteral: public DeferredCode {
  public:
-  ObjectLiteralDeferred(CodeGenerator* generator,
+  DeferredObjectLiteral(CodeGenerator* generator,
                         ObjectLiteral* node)
       : DeferredCode(generator), node_(node) {
-    set_comment("[ ObjectLiteralDeferred");
+    set_comment("[ DeferredObjectLiteral");
   }
+
   virtual void Generate();
+
  private:
   ObjectLiteral* node_;
 };
 
 
-void ObjectLiteralDeferred::Generate() {
-  // If the entry is undefined we call the runtime system to compute
-  // the literal.
+void DeferredObjectLiteral::Generate() {
+  Result literals(generator());
+  enter()->Bind(&literals);
+  // Since the entry is undefined we call the runtime system to
+  // compute the literal.
 
+  VirtualFrame* frame = generator()->frame();
   // Literal array (0).
-  __ push(ecx);
+  frame->Push(&literals);
   // Literal index (1).
-  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  frame->Push(Smi::FromInt(node_->literal_index()));
   // Constant properties (2).
-  __ push(Immediate(node_->constant_properties()));
-  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
-  __ mov(ebx, Operand(eax));
+  frame->Push(node_->constant_properties());
+  Result boilerplate =
+      frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+  exit_.Jump(&boilerplate);
 }
 
 
 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
   Comment cmnt(masm_, "[ ObjectLiteral");
-  ObjectLiteralDeferred* deferred = new ObjectLiteralDeferred(this, node);
+  DeferredObjectLiteral* deferred = new DeferredObjectLiteral(this, node);
 
-  // Retrieve the literal array and check the allocated entry.
-
-  // Load the function of this activation.
-  __ mov(ecx, frame_->Function());
+  // Retrieve the literals array and check the allocated entry.  Begin
+  // with a writable copy of the function of this activation in a
+  // register.
+  frame_->PushFunction();
+  Result literals = frame_->Pop();
+  literals.ToRegister();
+  frame_->Spill(literals.reg());
 
   // Load the literals array of the function.
-  __ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset));
+  __ mov(literals.reg(),
+         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
 
   // Load the literal at the ast saved index.
   int literal_offset =
       FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
-  __ mov(ebx, FieldOperand(ecx, literal_offset));
+  Result boilerplate = allocator_->Allocate();
+  ASSERT(boilerplate.is_valid());
+  __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
 
   // Check whether we need to materialize the object literal boilerplate.
-  // If so, jump to the deferred code.
-  __ cmp(ebx, Factory::undefined_value());
-  __ j(equal, deferred->enter(), not_taken);
-  __ bind(deferred->exit());
+  // If so, jump to the deferred code passing the literals array.
+  __ cmp(boilerplate.reg(), Factory::undefined_value());
+  deferred->enter()->Branch(equal, &literals, not_taken);
 
-  // Push the literal.
-  frame_->Push(ebx);
+  literals.Unuse();
+  // The deferred code returns the boilerplate object.
+  deferred->BindExit(&boilerplate);
+
+  // Push the boilerplate object.
+  frame_->Push(&boilerplate);
   // Clone the boilerplate object.
-  __ CallRuntime(Runtime::kCloneObjectLiteralBoilerplate, 1);
-  // Push the new cloned literal object as the result.
-  frame_->Push(eax);
-
+  Result clone =
+      frame_->CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+  // Push the newly cloned literal object as the result.
+  frame_->Push(&clone);
 
   for (int i = 0; i < node->properties()->length(); i++) {
     ObjectLiteral::Property* property = node->properties()->at(i);
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT: break;
+      case ObjectLiteral::Property::CONSTANT:
+        break;
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
+        // else fall through.
       case ObjectLiteral::Property::COMPUTED: {
         Handle<Object> key(property->key()->handle());
         Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
         if (key->IsSymbol()) {
-          __ mov(eax, frame_->Top());
-          frame_->Push(eax);
+          // Duplicate the object as the IC receiver.
+          frame_->Dup();
           Load(property->value());
-          frame_->Pop(eax);
-          __ Set(ecx, Immediate(key));
-          __ call(ic, RelocInfo::CODE_TARGET);
-          frame_->Pop();
-          // Ignore result.
+          Result value = frame_->Pop();
+          value.ToRegister(eax);
+
+          Result name = allocator_->Allocate(ecx);
+          ASSERT(name.is_valid());
+          __ Set(name.reg(), Immediate(key));
+          Result ignored =
+              frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET,
+                                     &value, &name, 0);
+          // Drop the duplicated receiver and ignore the result.
+          frame_->Drop();
           break;
         }
         // Fall through
       }
       case ObjectLiteral::Property::PROTOTYPE: {
-        __ mov(eax, frame_->Top());
-        frame_->Push(eax);
+        // Duplicate the object as an argument to the runtime call.
+        frame_->Dup();
         Load(property->key());
         Load(property->value());
-        __ CallRuntime(Runtime::kSetProperty, 3);
-        // Ignore result.
+        Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
+        // Ignore the result.
         break;
       }
       case ObjectLiteral::Property::SETTER: {
-        // Duplicate the resulting object on the stack. The runtime
-        // function will pop the three arguments passed in.
-        __ mov(eax, frame_->Top());
-        frame_->Push(eax);
+        // Duplicate the object as an argument to the runtime call.
+        frame_->Dup();
         Load(property->key());
-        frame_->Push(Immediate(Smi::FromInt(1)));
+        frame_->Push(Smi::FromInt(1));
         Load(property->value());
-        __ CallRuntime(Runtime::kDefineAccessor, 4);
-        // Ignore result.
+        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+        // Ignore the result.
         break;
       }
       case ObjectLiteral::Property::GETTER: {
-        // Duplicate the resulting object on the stack. The runtime
-        // function will pop the three arguments passed in.
-        __ mov(eax, frame_->Top());
-        frame_->Push(eax);
+        // Duplicate the object as an argument to the runtime call.
+        frame_->Dup();
         Load(property->key());
-        frame_->Push(Immediate(Smi::FromInt(0)));
+        frame_->Push(Smi::FromInt(0));
         Load(property->value());
-        __ CallRuntime(Runtime::kDefineAccessor, 4);
-        // Ignore result.
+        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+        // Ignore the result.
         break;
       }
       default: UNREACHABLE();
@@ -2689,58 +3553,137 @@
 }
 
 
+// This deferred code stub will be used for creating the boilerplate
+// by calling Runtime_CreateArrayLiteralBoilerplate.
+// Each created boilerplate is stored in the JSFunction and they are
+// therefore context dependent.
+class DeferredArrayLiteral: public DeferredCode {
+ public:
+  DeferredArrayLiteral(CodeGenerator* generator,
+                       ArrayLiteral* node)
+      : DeferredCode(generator), node_(node) {
+    set_comment("[ DeferredArrayLiteral");
+  }
+
+  virtual void Generate();
+
+ private:
+  ArrayLiteral* node_;
+};
+
+
+void DeferredArrayLiteral::Generate() {
+  Result literals(generator());
+  enter()->Bind(&literals);
+  // Since the entry is undefined we call the runtime system to
+  // compute the literal.
+
+  VirtualFrame* frame = generator()->frame();
+  // Literal array (0).
+  frame->Push(&literals);
+  // Literal index (1).
+  frame->Push(Smi::FromInt(node_->literal_index()));
+  // Constant properties (2).
+  frame->Push(node_->literals());
+  Result boilerplate =
+      frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+  exit_.Jump(&boilerplate);
+}
+
+
 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
   Comment cmnt(masm_, "[ ArrayLiteral");
+  DeferredArrayLiteral* deferred = new DeferredArrayLiteral(this, node);
 
-  // Call runtime to create the array literal.
-  frame_->Push(Immediate(node->literals()));
-  // Load the function of this frame.
-  __ mov(ecx, frame_->Function());
+  // Retrieve the literals array and check the allocated entry.  Begin
+  // with a writable copy of the function of this activation in a
+  // register.
+  frame_->PushFunction();
+  Result literals = frame_->Pop();
+  literals.ToRegister();
+  frame_->Spill(literals.reg());
+
   // Load the literals array of the function.
-  __ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset));
-  frame_->Push(ecx);
-  __ CallRuntime(Runtime::kCreateArrayLiteral, 2);
+  __ mov(literals.reg(),
+         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
+
+  // Load the literal at the ast saved index.
+  int literal_offset =
+      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+  Result boilerplate = allocator_->Allocate();
+  ASSERT(boilerplate.is_valid());
+  __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
+
+  // Check whether we need to materialize the object literal boilerplate.
+  // If so, jump to the deferred code passing the literals array.
+  __ cmp(boilerplate.reg(), Factory::undefined_value());
+  deferred->enter()->Branch(equal, &literals, not_taken);
+
+  literals.Unuse();
+  // The deferred code returns the boilerplate object.
+  deferred->BindExit(&boilerplate);
 
   // Push the resulting array literal on the stack.
-  frame_->Push(eax);
+  frame_->Push(&boilerplate);
+
+  // Clone the boilerplate object.
+  Result clone =
+      frame_->CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+  // Push the newly cloned literal object as the result.
+  frame_->Push(&clone);
 
   // Generate code to set the elements in the array that are not
   // literals.
   for (int i = 0; i < node->values()->length(); i++) {
     Expression* value = node->values()->at(i);
 
-    // If value is literal the property value is already
-    // set in the boilerplate object.
-    if (value->AsLiteral() == NULL) {
-      // The property must be set by generated code.
-      Load(value);
+    // If value is a literal the property value is already set in the
+    // boilerplate object.
+    if (value->AsLiteral() != NULL) continue;
+    // If value is a materialized literal the property value is already set
+    // in the boilerplate object if it is simple.
+    if (CompileTimeValue::IsCompileTimeValue(value)) continue;
 
-      // Get the value off the stack.
-      frame_->Pop(eax);
-      // Fetch the object literal while leaving on the stack.
-      __ mov(ecx, frame_->Top());
-      // Get the elements array.
-      __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+    // The property must be set by generated code.
+    Load(value);
 
-      // Write to the indexed properties array.
-      int offset = i * kPointerSize + Array::kHeaderSize;
-      __ mov(FieldOperand(ecx, offset), eax);
+    // Get the property value off the stack.
+    Result prop_value = frame_->Pop();
+    prop_value.ToRegister();
 
-      // Update the write barrier for the array address.
-      __ RecordWrite(ecx, offset, eax, ebx);
-    }
+    // Fetch the array literal while leaving a copy on the stack and
+    // use it to get the elements array.
+    frame_->Dup();
+    Result elements = frame_->Pop();
+    elements.ToRegister();
+    frame_->Spill(elements.reg());
+    // Get the elements array.
+    __ mov(elements.reg(),
+           FieldOperand(elements.reg(), JSObject::kElementsOffset));
+
+    // Write to the indexed properties array.
+    int offset = i * kPointerSize + Array::kHeaderSize;
+    __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
+
+    // Update the write barrier for the array address.
+    frame_->Spill(prop_value.reg());  // Overwritten by the write barrier.
+    Result scratch = allocator_->Allocate();
+    ASSERT(scratch.is_valid());
+    __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
   }
 }
 
 
 void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+  ASSERT(!in_spilled_code());
   // Call runtime routine to allocate the catch extension object and
   // assign the exception value to the catch variable.
-  Comment cmnt(masm_, "[CatchExtensionObject ");
+  Comment cmnt(masm_, "[ CatchExtensionObject");
   Load(node->key());
   Load(node->value());
-  __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
-  frame_->Push(eax);
+  Result result =
+      frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+  frame_->Push(&result);
 }
 
 
@@ -2753,58 +3696,80 @@
 
 void CodeGenerator::VisitAssignment(Assignment* node) {
   Comment cmnt(masm_, "[ Assignment");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
-  Reference target(this, node->target());
-  if (target.is_illegal()) return;
-
-  if (node->starts_initialization_block()) {
-    ASSERT(target.type() == Reference::NAMED ||
-           target.type() == Reference::KEYED);
-    // Change to slow case in the beginning of an initialization block
-    // to avoid the quadratic behavior of repeatedly adding fast properties.
-    int stack_position = (target.type() == Reference::NAMED) ? 0 : 1;
-    frame_->Push(Operand(esp, stack_position * kPointerSize));
-    __ CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-  if (node->op() == Token::ASSIGN ||
-      node->op() == Token::INIT_VAR ||
-      node->op() == Token::INIT_CONST) {
-    Load(node->value());
-
-  } else {
-    target.GetValue(NOT_INSIDE_TYPEOF);
-    Literal* literal = node->value()->AsLiteral();
-    if (IsInlineSmi(literal)) {
-      SmiOperation(node->binary_op(), node->type(), literal->handle(), false,
-                   NO_OVERWRITE);
-    } else {
-      Load(node->value());
-      GenericBinaryOperation(node->binary_op(), node->type());
+  { Reference target(this, node->target());
+    if (target.is_illegal()) {
+      // Fool the virtual frame into thinking that we left the assignment's
+      // value on the frame.
+      frame_->Push(Smi::FromInt(0));
+      return;
     }
-  }
+    Variable* var = node->target()->AsVariableProxy()->AsVariable();
 
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  if (var != NULL &&
-      var->mode() == Variable::CONST &&
-      node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
-    // Assignment ignored - leave the value on the stack.
-  } else {
-    CodeForSourcePosition(node->position());
-    if (node->op() == Token::INIT_CONST) {
-      // Dynamic constant initializations must use the function context
-      // and initialize the actual constant declared. Dynamic variable
-      // initializations are simply assignments and use SetValue.
-      target.SetValue(CONST_INIT);
+    if (node->starts_initialization_block()) {
+      ASSERT(target.type() == Reference::NAMED ||
+             target.type() == Reference::KEYED);
+      // Change to slow case in the beginning of an initialization
+      // block to avoid the quadratic behavior of repeatedly adding
+      // fast properties.
+
+      // The receiver is the argument to the runtime call.  It is the
+      // first value pushed when the reference was loaded to the
+      // frame.
+      frame_->PushElementAt(target.size() - 1);
+      Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+    }
+    if (node->op() == Token::ASSIGN ||
+        node->op() == Token::INIT_VAR ||
+        node->op() == Token::INIT_CONST) {
+      Load(node->value());
+
     } else {
-      target.SetValue(NOT_CONST_INIT);
+      Literal* literal = node->value()->AsLiteral();
+      Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
+      // There are two cases where the target is not read in the right hand
+      // side, that are easy to test for: the right hand side is a literal,
+      // or the right hand side is a different variable.  TakeValue invalidates
+      // the target, with an implicit promise that it will be written to again
+      // before it is read.
+      if (literal != NULL || (right_var != NULL && right_var != var)) {
+        target.TakeValue(NOT_INSIDE_TYPEOF);
+      } else {
+        target.GetValue(NOT_INSIDE_TYPEOF);
+      }
+      if (IsInlineSmi(literal)) {
+        SmiOperation(node->binary_op(), node->type(), literal->handle(), false,
+                     NO_OVERWRITE);
+      } else {
+        Load(node->value());
+        GenericBinaryOperation(node->binary_op(), node->type());
+      }
+    }
+
+    if (var != NULL &&
+        var->mode() == Variable::CONST &&
+        node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+      // Assignment ignored - leave the value on the stack.
+    } else {
+      CodeForSourcePosition(node->position());
+      if (node->op() == Token::INIT_CONST) {
+        // Dynamic constant initializations must use the function context
+        // and initialize the actual constant declared. Dynamic variable
+        // initializations are simply assignments and use SetValue.
+        target.SetValue(CONST_INIT);
+      } else {
+        target.SetValue(NOT_CONST_INIT);
+      }
       if (node->ends_initialization_block()) {
         ASSERT(target.type() == Reference::NAMED ||
                target.type() == Reference::KEYED);
-        // End of initialization block. Revert to fast case.
-        int stack_position = (target.type() == Reference::NAMED) ? 1 : 2;
-        frame_->Push(Operand(esp, stack_position * kPointerSize));
-        __ CallRuntime(Runtime::kToFastProperties, 1);
+        // End of initialization block. Revert to fast case.  The
+        // argument to the runtime call is the receiver, which is the
+        // first value pushed as part of the reference, which is below
+        // the lhs value.
+        frame_->PushElementAt(target.size());
+        Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
       }
     }
   }
@@ -2813,17 +3778,16 @@
 
 void CodeGenerator::VisitThrow(Throw* node) {
   Comment cmnt(masm_, "[ Throw");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
   Load(node->exception());
-  __ CallRuntime(Runtime::kThrow, 1);
-  frame_->Push(eax);
+  Result result = frame_->CallRuntime(Runtime::kThrow, 1);
+  frame_->Push(&result);
 }
 
 
 void CodeGenerator::VisitProperty(Property* node) {
   Comment cmnt(masm_, "[ Property");
-
   Reference property(this, node);
   property.GetValue(typeof_state());
 }
@@ -2834,7 +3798,7 @@
 
   ZoneList<Expression*>* args = node->arguments();
 
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
   // Check if the function is a variable or a property.
   Expression* function = node->expression();
@@ -2856,27 +3820,31 @@
     // ----------------------------------
 
     // Push the name of the function and the receiver onto the stack.
-    frame_->Push(Immediate(var->name()));
+    frame_->Push(var->name());
 
     // Pass the global object as the receiver and let the IC stub
     // patch the stack to use the global proxy as 'this' in the
     // invoked function.
     LoadGlobal();
+
     // Load the arguments.
-    for (int i = 0; i < args->length(); i++) {
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
       Load(args->at(i));
     }
 
     // Setup the receiver register and call the IC initialization code.
     Handle<Code> stub = (loop_nesting() > 0)
-        ? ComputeCallInitializeInLoop(args->length())
-        : ComputeCallInitialize(args->length());
+        ? ComputeCallInitializeInLoop(arg_count)
+        : ComputeCallInitialize(arg_count);
     CodeForSourcePosition(node->position());
-    __ call(stub, RelocInfo::CODE_TARGET_CONTEXT);
-    __ mov(esi, frame_->Context());
+    Result result = frame_->CallCodeObject(stub,
+                                           RelocInfo::CODE_TARGET_CONTEXT,
+                                           arg_count + 1);
+    frame_->RestoreContextRegister();
 
-    // Overwrite the function on the stack with the result.
-    __ mov(frame_->Top(), eax);
+    // Replace the function on the stack with the result.
+    frame_->SetElementAt(0, &result);
 
   } else if (var != NULL && var->slot() != NULL &&
              var->slot()->type() == Slot::LOOKUP) {
@@ -2886,8 +3854,8 @@
 
     // Load the function
     frame_->Push(esi);
-    frame_->Push(Immediate(var->name()));
-    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    frame_->Push(var->name());
+    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
     // eax: slot value; edx: receiver
 
     // Load the receiver.
@@ -2907,22 +3875,27 @@
       // ------------------------------------------------------------------
 
       // Push the name of the function and the receiver onto the stack.
-      frame_->Push(Immediate(literal->handle()));
+      frame_->Push(literal->handle());
       Load(property->obj());
 
       // Load the arguments.
-      for (int i = 0; i < args->length(); i++) Load(args->at(i));
+      int arg_count = args->length();
+      for (int i = 0; i < arg_count; i++) {
+        Load(args->at(i));
+      }
 
       // Call the IC initialization code.
       Handle<Code> stub = (loop_nesting() > 0)
-        ? ComputeCallInitializeInLoop(args->length())
-        : ComputeCallInitialize(args->length());
+        ? ComputeCallInitializeInLoop(arg_count)
+        : ComputeCallInitialize(arg_count);
       CodeForSourcePosition(node->position());
-      __ call(stub, RelocInfo::CODE_TARGET);
-      __ mov(esi, frame_->Context());
+      Result result = frame_->CallCodeObject(stub,
+                                             RelocInfo::CODE_TARGET,
+                                             arg_count + 1);
+      frame_->RestoreContextRegister();
 
-      // Overwrite the function on the stack with the result.
-      __ mov(frame_->Top(), eax);
+      // Replace the function on the stack with the result.
+      frame_->SetElementAt(0, &result);
 
     } else {
       // -------------------------------------------
@@ -2934,8 +3907,13 @@
       ref.GetValue(NOT_INSIDE_TYPEOF);
 
       // Pass receiver to called function.
-      // The reference's size is non-negative.
-      frame_->Push(frame_->Element(ref.size()));
+      if (property->is_synthetic()) {
+        // Use global object as receiver.
+        LoadGlobalReceiver();
+      } else {
+        // The reference's size is non-negative.
+        frame_->PushElementAt(ref.size());
+      }
 
       // Call the function.
       CallWithArguments(args, node->position());
@@ -2950,7 +3928,7 @@
     Load(function);
 
     // Pass the global proxy as the receiver.
-    LoadGlobalReceiver(eax);
+    LoadGlobalReceiver();
 
     // Call the function.
     CallWithArguments(args, node->position());
@@ -2960,7 +3938,7 @@
 
 void CodeGenerator::VisitCallNew(CallNew* node) {
   Comment cmnt(masm_, "[ CallNew");
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
   // According to ECMA-262, section 11.2.2, page 44, the function
   // expression in new calls must be evaluated before the
@@ -2976,24 +3954,37 @@
 
   // Push the arguments ("left-to-right") on the stack.
   ZoneList<Expression*>* args = node->arguments();
-  for (int i = 0; i < args->length(); i++) Load(args->at(i));
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Load(args->at(i));
+  }
 
   // Constructors are called with the number of arguments in register
   // eax for now. Another option would be to have separate construct
   // call trampolines per different arguments counts encountered.
-  __ Set(eax, Immediate(args->length()));
+  Result num_args = allocator()->Allocate(eax);
+  ASSERT(num_args.is_valid());
+  __ Set(num_args.reg(), Immediate(arg_count));
 
   // Load the function into temporary function slot as per calling
   // convention.
-  __ mov(edi, frame_->Element(args->length() + 1));
+  frame_->PushElementAt(arg_count + 1);
+  Result function = frame_->Pop();
+  function.ToRegister(edi);
+  ASSERT(function.is_valid());
 
   // Call the construct call builtin that handles allocation and
   // constructor invocation.
   CodeForSourcePosition(node->position());
-  __ call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
-          RelocInfo::CONSTRUCT_CALL);
-  // Discard the function and "push" the newly created object.
-  __ mov(frame_->Top(), eax);
+  Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+  Result result = frame_->CallCodeObject(ic,
+                                         RelocInfo::CONSTRUCT_CALL,
+                                         &num_args,
+                                         &function,
+                                         arg_count + 1);
+
+  // Replace the function on the stack with the result.
+  frame_->SetElementAt(0, &result);
 }
 
 
@@ -3007,50 +3998,64 @@
   ZoneList<Expression*>* args = node->arguments();
   Expression* function = node->expression();
 
-  CodeForStatement(node);
+  CodeForStatementPosition(node);
 
-  // Prepare stack for call to resolved function.
+  // Prepare the stack for the call to the resolved function.
   Load(function);
-  __ push(Immediate(Factory::undefined_value()));  // Slot for receiver
-  for (int i = 0; i < args->length(); i++) {
+
+  // Allocate a frame slot for the receiver.
+  frame_->Push(Factory::undefined_value());
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
     Load(args->at(i));
   }
 
-  // Prepare stack for call to ResolvePossiblyDirectEval.
-  __ push(Operand(esp, args->length() * kPointerSize + kPointerSize));
-  if (args->length() > 0) {
-    __ push(Operand(esp, args->length() * kPointerSize));
+  // Prepare the stack for the call to ResolvePossiblyDirectEval.
+  frame_->PushElementAt(arg_count + 1);
+  if (arg_count > 0) {
+    frame_->PushElementAt(arg_count);
   } else {
-    __ push(Immediate(Factory::undefined_value()));
+    frame_->Push(Factory::undefined_value());
   }
 
   // Resolve the call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+  Result result =
+      frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
 
-  // Touch up stack with the right values for the function and the receiver.
-  __ mov(edx, FieldOperand(eax, FixedArray::kHeaderSize));
-  __ mov(Operand(esp, (args->length() + 1) * kPointerSize), edx);
-  __ mov(edx, FieldOperand(eax, FixedArray::kHeaderSize + kPointerSize));
-  __ mov(Operand(esp, args->length() * kPointerSize), edx);
+  // Touch up the stack with the right values for the function and the
+  // receiver.  Use a scratch register to avoid destroying the result.
+  Result scratch = allocator_->Allocate();
+  ASSERT(scratch.is_valid());
+  __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
+  frame_->SetElementAt(arg_count + 1, &scratch);
+
+  // We can reuse the result register now.
+  frame_->Spill(result.reg());
+  __ mov(result.reg(),
+         FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
+  frame_->SetElementAt(arg_count, &result);
 
   // Call the function.
   CodeForSourcePosition(node->position());
+  CallFunctionStub call_function(arg_count);
+  result = frame_->CallStub(&call_function, arg_count + 1);
 
-  CallFunctionStub call_function(args->length());
-  __ CallStub(&call_function);
-
-  // Restore context and pop function from the stack.
-  __ mov(esi, frame_->Context());
-  __ mov(frame_->Top(), eax);
+  // Restore the context and overwrite the function on the stack with
+  // the result.
+  frame_->RestoreContextRegister();
+  frame_->SetElementAt(0, &result);
 }
 
 
 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   Load(args->at(0));
-  frame_->Pop(eax);
-  __ test(eax, Immediate(kSmiTagMask));
-  cc_reg_ = zero;
+  Result value = frame_->Pop();
+  value.ToRegister();
+  ASSERT(value.is_valid());
+  __ test(value.reg(), Immediate(kSmiTagMask));
+  value.Unuse();
+  destination()->Split(zero);
 }
 
 
@@ -3067,20 +4072,23 @@
   if (ShouldGenerateLog(args->at(0))) {
     Load(args->at(1));
     Load(args->at(2));
-    __ CallRuntime(Runtime::kLog, 2);
+    frame_->CallRuntime(Runtime::kLog, 2);
   }
 #endif
   // Finally, we're expected to leave a value on the top of the stack.
-  frame_->Push(Immediate(Factory::undefined_value()));
+  frame_->Push(Factory::undefined_value());
 }
 
 
 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   Load(args->at(0));
-  frame_->Pop(eax);
-  __ test(eax, Immediate(kSmiTagMask | 0x80000000));
-  cc_reg_ = zero;
+  Result value = frame_->Pop();
+  value.ToRegister();
+  ASSERT(value.is_valid());
+  __ test(value.reg(), Immediate(kSmiTagMask | 0x80000000));
+  value.Unuse();
+  destination()->Split(zero);
 }
 
 
@@ -3093,38 +4101,46 @@
 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
-  Label slow_case;
-  Label end;
-  Label not_a_flat_string;
-  Label not_a_cons_string_either;
-  Label try_again_with_new_string;
-  Label ascii_string;
-  Label got_char_code;
+  JumpTarget slow_case(this);
+  JumpTarget end(this);
+  JumpTarget not_a_flat_string(this);
+  JumpTarget a_cons_string(this);
+  JumpTarget try_again_with_new_string(this, JumpTarget::BIDIRECTIONAL);
+  JumpTarget ascii_string(this);
+  JumpTarget got_char_code(this);
 
-  // Load the string into eax and the index into ebx.
   Load(args->at(0));
   Load(args->at(1));
-  frame_->Pop(ebx);
-  frame_->Pop(eax);
+  // Reserve register ecx, to use as shift amount later
+  Result shift_amount = allocator()->Allocate(ecx);
+  ASSERT(shift_amount.is_valid());
+  Result index = frame_->Pop();
+  index.ToRegister();
+  Result object = frame_->Pop();
+  object.ToRegister();
   // If the receiver is a smi return undefined.
   ASSERT(kSmiTag == 0);
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &slow_case, not_taken);
+  __ test(object.reg(), Immediate(kSmiTagMask));
+  slow_case.Branch(zero, not_taken);
 
   // Check for negative or non-smi index.
   ASSERT(kSmiTag == 0);
-  __ test(ebx, Immediate(kSmiTagMask | 0x80000000));
-  __ j(not_zero, &slow_case, not_taken);
+  __ test(index.reg(), Immediate(kSmiTagMask | 0x80000000));
+  slow_case.Branch(not_zero, not_taken);
   // Get rid of the smi tag on the index.
-  __ sar(ebx, kSmiTagSize);
+  frame_->Spill(index.reg());
+  __ sar(index.reg(), kSmiTagSize);
 
-  __ bind(&try_again_with_new_string);
-  // Get the type of the heap object into edi.
-  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(edi, FieldOperand(edx, Map::kInstanceTypeOffset));
+  try_again_with_new_string.Bind(&object, &index, &shift_amount);
+  // Get the type of the heap object.
+  Result object_type = allocator()->Allocate();
+  ASSERT(object_type.is_valid());
+  __ mov(object_type.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+  __ movzx_b(object_type.reg(),
+             FieldOperand(object_type.reg(), Map::kInstanceTypeOffset));
   // We don't handle non-strings.
-  __ test(edi, Immediate(kIsNotStringMask));
-  __ j(not_zero, &slow_case, not_taken);
+  __ test(object_type.reg(), Immediate(kIsNotStringMask));
+  slow_case.Branch(not_zero, not_taken);
 
   // Here we make assumptions about the tag values and the shifts needed.
   // See the comment in objects.h.
@@ -3133,157 +4149,187 @@
          String::kMediumLengthShift);
   ASSERT(kShortStringTag + String::kLongLengthShift ==
          String::kShortLengthShift);
-  __ mov(ecx, Operand(edi));
-  __ and_(ecx, kStringSizeMask);
-  __ add(Operand(ecx), Immediate(String::kLongLengthShift));
-  // Get the length field.
-  __ mov(edx, FieldOperand(eax, String::kLengthOffset));
-  __ shr(edx);  // ecx is implicit operand.
-  // edx is now the length of the string.
-
+  __ mov(shift_amount.reg(), Operand(object_type.reg()));
+  __ and_(shift_amount.reg(), kStringSizeMask);
+  __ add(Operand(shift_amount.reg()), Immediate(String::kLongLengthShift));
+  // Get the length field. Temporary register now used for length.
+  Result length = object_type;
+  __ mov(length.reg(), FieldOperand(object.reg(), String::kLengthOffset));
+  __ shr(length.reg());  // shift_amount, in ecx, is implicit operand.
   // Check for index out of range.
-  __ cmp(ebx, Operand(edx));
-  __ j(greater_equal, &slow_case, not_taken);
+  __ cmp(index.reg(), Operand(length.reg()));
+  slow_case.Branch(greater_equal, not_taken);
+  length.Unuse();
+  // Load the object type into object_type again.
+  // These two instructions are duplicated from above, to save a register.
+  __ mov(object_type.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+  __ movzx_b(object_type.reg(),
+             FieldOperand(object_type.reg(), Map::kInstanceTypeOffset));
 
   // We need special handling for non-flat strings.
   ASSERT(kSeqStringTag == 0);
-  __ test(edi, Immediate(kStringRepresentationMask));
-  __ j(not_zero, &not_a_flat_string, not_taken);
-
+  __ test(object_type.reg(), Immediate(kStringRepresentationMask));
+  not_a_flat_string.Branch(not_zero, &object, &index, &object_type,
+                           &shift_amount, not_taken);
+  shift_amount.Unuse();
   // Check for 1-byte or 2-byte string.
-  __ test(edi, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string, taken);
+  __ test(object_type.reg(), Immediate(kStringEncodingMask));
+  ascii_string.Branch(not_zero, &object, &index, &object_type, taken);
 
   // 2-byte string.
   // Load the 2-byte character code.
-  __ movzx_w(eax,
-             FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
-  __ jmp(&got_char_code);
+  __ movzx_w(object_type.reg(), FieldOperand(object.reg(),
+                                             index.reg(),
+                                             times_2,
+                                             SeqTwoByteString::kHeaderSize));
+  object.Unuse();
+  index.Unuse();
+  got_char_code.Jump(&object_type);
 
   // ASCII string.
-  __ bind(&ascii_string);
+  ascii_string.Bind(&object, &index, &object_type);
   // Load the byte.
-  __ movzx_b(eax, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
-
-  __ bind(&got_char_code);
+  __ movzx_b(object_type.reg(), FieldOperand(object.reg(),
+                                             index.reg(),
+                                             times_1,
+                                             SeqAsciiString::kHeaderSize));
+  object.Unuse();
+  index.Unuse();
+  got_char_code.Bind(&object_type);
   ASSERT(kSmiTag == 0);
-  __ shl(eax, kSmiTagSize);
-  frame_->Push(eax);
-  __ jmp(&end);
+  __ shl(object_type.reg(), kSmiTagSize);
+  frame_->Push(&object_type);
+  end.Jump();
 
   // Handle non-flat strings.
-  __ bind(&not_a_flat_string);
-  __ and_(edi, kStringRepresentationMask);
-  __ cmp(edi, kConsStringTag);
-  __ j(not_equal, &not_a_cons_string_either, not_taken);
-
-  // ConsString.
-  // Get the first of the two strings.
-  __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
-  __ jmp(&try_again_with_new_string);
-
-  __ bind(&not_a_cons_string_either);
-  __ cmp(edi, kSlicedStringTag);
-  __ j(not_equal, &slow_case, not_taken);
+  not_a_flat_string.Bind(&object, &index, &object_type, &shift_amount);
+  __ and_(object_type.reg(), kStringRepresentationMask);
+  __ cmp(object_type.reg(), kConsStringTag);
+  a_cons_string.Branch(equal, &object, &index, &shift_amount, taken);
+  __ cmp(object_type.reg(), kSlicedStringTag);
+  slow_case.Branch(not_equal, not_taken);
+  object_type.Unuse();
 
   // SlicedString.
   // Add the offset to the index.
-  __ add(ebx, FieldOperand(eax, SlicedString::kStartOffset));
-  __ j(overflow, &slow_case);
-  // Get the underlying string.
-  __ mov(eax, FieldOperand(eax, SlicedString::kBufferOffset));
-  __ jmp(&try_again_with_new_string);
+  __ add(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
+  slow_case.Branch(overflow);
+  // Getting the underlying string is done by running the cons string code.
 
-  __ bind(&slow_case);
-  frame_->Push(Immediate(Factory::undefined_value()));
+  // ConsString.
+  a_cons_string.Bind(&object, &index, &shift_amount);
+  // Get the first of the two strings.
+  frame_->Spill(object.reg());
+  // Both sliced and cons strings store their source string at the same place.
+  ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
+  __ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
+  try_again_with_new_string.Jump(&object, &index, &shift_amount);
 
-  __ bind(&end);
+  // No results live at this point.
+  slow_case.Bind();
+  frame_->Push(Factory::undefined_value());
+  end.Bind();
 }
 
 
 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   Load(args->at(0));
-  Label answer;
-  // We need the CC bits to come out as not_equal in the case where the
-  // object is a smi.  This can't be done with the usual test opcode so
-  // we copy the object to ecx and do some destructive ops on it that
-  // result in the right CC bits.
-  frame_->Pop(eax);
-  __ mov(ecx, Operand(eax));
-  __ and_(ecx, kSmiTagMask);
-  __ xor_(ecx, kSmiTagMask);
-  __ j(not_equal, &answer, not_taken);
+  Result value = frame_->Pop();
+  value.ToRegister();
+  ASSERT(value.is_valid());
+  __ test(value.reg(), Immediate(kSmiTagMask));
+  destination()->false_target()->Branch(equal);
   // It is a heap object - get map.
-  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
   // Check if the object is a JS array or not.
-  __ cmp(eax, JS_ARRAY_TYPE);
-  __ bind(&answer);
-  cc_reg_ = equal;
+  __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, temp.reg());
+  value.Unuse();
+  temp.Unuse();
+  destination()->Split(equal);
 }
 
 
 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
-
-  // Seed the result with the formal parameters count, which will be
-  // used in case no arguments adaptor frame is found below the
-  // current frame.
-  __ Set(eax, Immediate(Smi::FromInt(scope_->num_parameters())));
-
+  Result initial_value = allocator()->Allocate(eax);
+  ASSERT(initial_value.is_valid());
+  __ Set(initial_value.reg(),
+         Immediate(Smi::FromInt(scope_->num_parameters())));
+  // ArgumentsAccessStub takes the parameter count as an input argument
+  // in register eax.
   // Call the shared stub to get to the arguments.length.
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
-  __ CallStub(&stub);
-  frame_->Push(eax);
+  Result result = frame_->CallStub(&stub, &initial_value, 0);
+  frame_->Push(&result);
 }
 
 
 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
-  Label leave;
+  JumpTarget leave(this);
   Load(args->at(0));  // Load the object.
-  __ mov(eax, frame_->Top());
+  frame_->Dup();
+  Result object = frame_->Pop();
+  object.ToRegister();
+  ASSERT(object.is_valid());
   // if (object->IsSmi()) return object.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &leave, taken);
+  __ test(object.reg(), Immediate(kSmiTagMask));
+  leave.Branch(zero, taken);
   // It is a heap object - get map.
-  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
   // if (!object->IsJSValue()) return object.
-  __ cmp(ecx, JS_VALUE_TYPE);
-  __ j(not_equal, &leave, not_taken);
-  __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
-  __ mov(frame_->Top(), eax);
-  __ bind(&leave);
+  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
+  leave.Branch(not_equal, not_taken);
+  __ mov(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
+  object.Unuse();
+  frame_->SetElementAt(0, &temp);
+  leave.Bind();
 }
 
 
 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
-  Label leave;
+  JumpTarget leave(this);
   Load(args->at(0));  // Load the object.
   Load(args->at(1));  // Load the value.
-  __ mov(eax, frame_->Element(1));
-  __ mov(ecx, frame_->Top());
-  // if (object->IsSmi()) return object.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &leave, taken);
-  // It is a heap object - get map.
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  // if (!object->IsJSValue()) return object.
-  __ cmp(ebx, JS_VALUE_TYPE);
-  __ j(not_equal, &leave, not_taken);
+  Result value = frame_->Pop();
+  Result object = frame_->Pop();
+  value.ToRegister();
+  object.ToRegister();
+
+  // if (object->IsSmi()) return value.
+  __ test(object.reg(), Immediate(kSmiTagMask));
+  leave.Branch(zero, &value, taken);
+
+  // It is a heap object - get its map.
+  Result scratch = allocator_->Allocate();
+  ASSERT(scratch.is_valid());
+  // if (!object->IsJSValue()) return value.
+  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
+  leave.Branch(not_equal, &value, not_taken);
+
   // Store the value.
-  __ mov(FieldOperand(eax, JSValue::kValueOffset), ecx);
-  // Update the write barrier.
-  __ RecordWrite(eax, JSValue::kValueOffset, ecx, ebx);
+  __ mov(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  Result duplicate_value = allocator_->Allocate();
+  ASSERT(duplicate_value.is_valid());
+  __ mov(duplicate_value.reg(), value.reg());
+  // The object register is also overwritten by the write barrier and
+  // possibly aliased in the frame.
+  frame_->Spill(object.reg());
+  __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
+                 scratch.reg());
+  object.Unuse();
+  scratch.Unuse();
+  duplicate_value.Unuse();
+
   // Leave.
-  __ bind(&leave);
-  __ mov(ecx, frame_->Top());
-  frame_->Pop();
-  __ mov(frame_->Top(), ecx);
+  leave.Bind(&value);
+  frame_->Push(&value);
 }
 
 
@@ -3293,12 +4339,15 @@
   // Load the key onto the stack and set register eax to the formal
   // parameters count for the currently executing function.
   Load(args->at(0));
-  __ Set(eax, Immediate(Smi::FromInt(scope_->num_parameters())));
+  Result parameters_count = allocator()->Allocate(eax);
+  ASSERT(parameters_count.is_valid());
+  __ Set(parameters_count.reg(),
+         Immediate(Smi::FromInt(scope_->num_parameters())));
 
   // Call the shared stub to get to arguments[key].
   ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
-  __ CallStub(&stub);
-  __ mov(frame_->Top(), eax);
+  Result result = frame_->CallStub(&stub, &parameters_count, 0);
+  frame_->SetElementAt(0, &result);
 }
 
 
@@ -3308,15 +4357,21 @@
   // Load the two objects into registers and perform the comparison.
   Load(args->at(0));
   Load(args->at(1));
-  frame_->Pop(eax);
-  frame_->Pop(ecx);
-  __ cmp(eax, Operand(ecx));
-  cc_reg_ = equal;
+  Result right = frame_->Pop();
+  Result left = frame_->Pop();
+  right.ToRegister();
+  left.ToRegister();
+  __ cmp(right.reg(), Operand(left.reg()));
+  right.Unuse();
+  left.Unuse();
+  destination()->Split(equal);
 }
 
 
 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-  if (CheckForInlineRuntimeCall(node)) return;
+  if (CheckForInlineRuntimeCall(node)) {
+    return;
+  }
 
   ZoneList<Expression*>* args = node->arguments();
   Comment cmnt(masm_, "[ CallRuntime");
@@ -3324,48 +4379,63 @@
 
   if (function == NULL) {
     // Prepare stack for calling JS runtime function.
-    frame_->Push(Immediate(node->name()));
+    frame_->Push(node->name());
     // Push the builtins object found in the current global object.
-    __ mov(edx, GlobalObject());
-    frame_->Push(FieldOperand(edx, GlobalObject::kBuiltinsOffset));
+    Result temp = allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    __ mov(temp.reg(), GlobalObject());
+    __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
+    frame_->Push(&temp);
   }
 
   // Push the arguments ("left-to-right").
-  for (int i = 0; i < args->length(); i++)
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
     Load(args->at(i));
+  }
 
-  if (function != NULL) {
-    // Call the C runtime function.
-    __ CallRuntime(function, args->length());
-    frame_->Push(eax);
-  } else {
+  if (function == NULL) {
     // Call the JS runtime function.
-    Handle<Code> stub = ComputeCallInitialize(args->length());
-    __ Set(eax, Immediate(args->length()));
-    __ call(stub, RelocInfo::CODE_TARGET);
-    __ mov(esi, frame_->Context());
-    __ mov(frame_->Top(), eax);
+    Handle<Code> stub = ComputeCallInitialize(arg_count);
+
+    Result num_args = allocator()->Allocate(eax);
+    ASSERT(num_args.is_valid());
+    __ Set(num_args.reg(), Immediate(args->length()));
+    Result answer = frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET,
+                                           &num_args, arg_count + 1);
+    frame_->RestoreContextRegister();
+    frame_->SetElementAt(0, &answer);
+  } else {
+    // Call the C runtime function.
+    Result answer = frame_->CallRuntime(function, arg_count);
+    frame_->Push(&answer);
   }
 }
 
 
 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+  // Note that because of NOT and an optimization in comparison of a typeof
+  // expression to a literal string, this function can fail to leave a value
+  // on top of the frame or in the cc register.
   Comment cmnt(masm_, "[ UnaryOperation");
 
   Token::Value op = node->op();
 
   if (op == Token::NOT) {
-    LoadCondition(node->expression(), NOT_INSIDE_TYPEOF,
-                  false_target(), true_target(), true);
-    cc_reg_ = NegateCondition(cc_reg_);
+    // Swap the true and false targets but keep the same actual label
+    // as the fall through.
+    destination()->Invert();
+    LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
+    // Swap the labels back.
+    destination()->Invert();
 
   } else if (op == Token::DELETE) {
     Property* property = node->expression()->AsProperty();
     if (property != NULL) {
       Load(property->obj());
       Load(property->key());
-      __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-      frame_->Push(eax);
+      Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
+      frame_->Push(&answer);
       return;
     }
 
@@ -3374,40 +4444,41 @@
       Slot* slot = variable->slot();
       if (variable->is_global()) {
         LoadGlobal();
-        frame_->Push(Immediate(variable->name()));
-        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-        frame_->Push(eax);
+        frame_->Push(variable->name());
+        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+                                              CALL_FUNCTION, 2);
+        frame_->Push(&answer);
         return;
 
       } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
         // lookup the context holding the named variable
         frame_->Push(esi);
-        frame_->Push(Immediate(variable->name()));
-        __ CallRuntime(Runtime::kLookupContext, 2);
-        // eax: context
-        frame_->Push(eax);
-        frame_->Push(Immediate(variable->name()));
-        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-        frame_->Push(eax);
+        frame_->Push(variable->name());
+        Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
+        frame_->Push(&context);
+        frame_->Push(variable->name());
+        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
+                                              CALL_FUNCTION, 2);
+        frame_->Push(&answer);
         return;
       }
 
       // Default: Result of deleting non-global, not dynamically
       // introduced variables is false.
-      frame_->Push(Immediate(Factory::false_value()));
+      frame_->Push(Factory::false_value());
 
     } else {
       // Default: Result of deleting expressions is true.
       Load(node->expression());  // may have side-effects
-      __ Set(frame_->Top(), Immediate(Factory::true_value()));
+      frame_->SetElementAt(0, Factory::true_value());
     }
 
   } else if (op == Token::TYPEOF) {
     // Special case for loading the typeof expression; see comment on
     // LoadTypeofExpression().
     LoadTypeofExpression(node->expression());
-    __ CallRuntime(Runtime::kTypeof, 1);
-    frame_->Push(eax);
+    Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
+    frame_->Push(&answer);
 
   } else if (op == Token::VOID) {
     Expression* expression = node->expression();
@@ -3420,10 +4491,10 @@
         expression->AsLiteral()->IsNull())) {
       // Omit evaluating the value of the primitive literal.
       // It will be discarded anyway, and can have no side effect.
-      frame_->Push(Immediate(Factory::undefined_value()));
+      frame_->Push(Factory::undefined_value());
     } else {
       Load(node->expression());
-      __ mov(frame_->Top(), Factory::undefined_value());
+      frame_->SetElementAt(0, Factory::undefined_value());
     }
 
   } else {
@@ -3438,44 +4509,51 @@
       case Token::SUB: {
         UnarySubStub stub;
         // TODO(1222589): remove dependency of TOS being cached inside stub
-        frame_->Pop(eax);
-        __ CallStub(&stub);
-        frame_->Push(eax);
+        Result operand = frame_->Pop();
+        operand.ToRegister(eax);
+        Result answer = frame_->CallStub(&stub, &operand, 0);
+        frame_->Push(&answer);
         break;
       }
 
       case Token::BIT_NOT: {
         // Smi check.
-        Label smi_label;
-        Label continue_label;
-        frame_->Pop(eax);
-        __ test(eax, Immediate(kSmiTagMask));
-        __ j(zero, &smi_label, taken);
+        JumpTarget smi_label(this);
+        JumpTarget continue_label(this);
+        Result operand = frame_->Pop();
+        operand.ToRegister();
+        __ test(operand.reg(), Immediate(kSmiTagMask));
+        smi_label.Branch(zero, &operand, taken);
 
-        frame_->Push(eax);  // undo popping of TOS
-        __ InvokeBuiltin(Builtins::BIT_NOT, CALL_FUNCTION);
+        frame_->Push(&operand);  // undo popping of TOS
+        Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
+                                              CALL_FUNCTION, 1);
 
-        __ jmp(&continue_label);
-        __ bind(&smi_label);
-        __ not_(eax);
-        __ and_(eax, ~kSmiTagMask);  // Remove inverted smi-tag.
-        __ bind(&continue_label);
-        frame_->Push(eax);
+        continue_label.Jump(&answer);
+        smi_label.Bind(&answer);
+        answer.ToRegister();
+        frame_->Spill(answer.reg());
+        __ not_(answer.reg());
+        __ and_(answer.reg(), ~kSmiTagMask);  // Remove inverted smi-tag.
+        continue_label.Bind(&answer);
+        frame_->Push(&answer);
         break;
       }
 
       case Token::ADD: {
         // Smi check.
-        Label continue_label;
-        frame_->Pop(eax);
-        __ test(eax, Immediate(kSmiTagMask));
-        __ j(zero, &continue_label);
+        JumpTarget continue_label(this);
+        Result operand = frame_->Pop();
+        operand.ToRegister();
+        __ test(operand.reg(), Immediate(kSmiTagMask));
+        continue_label.Branch(zero, &operand, taken);
 
-        frame_->Push(eax);
-        __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+        frame_->Push(&operand);
+        Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
+                                              CALL_FUNCTION, 1);
 
-        __ bind(&continue_label);
-        frame_->Push(eax);
+        continue_label.Bind(&answer);
+        frame_->Push(&answer);
         break;
       }
 
@@ -3486,9 +4564,9 @@
 }
 
 
-class CountOperationDeferred: public DeferredCode {
+class DeferredCountOperation: public DeferredCode {
  public:
-  CountOperationDeferred(CodeGenerator* generator,
+  DeferredCountOperation(CodeGenerator* generator,
                          bool is_postfix,
                          bool is_increment,
                          int result_offset)
@@ -3496,7 +4574,7 @@
         is_postfix_(is_postfix),
         is_increment_(is_increment),
         result_offset_(result_offset) {
-    set_comment("[ CountOperationDeferred");
+    set_comment("[ DeferredCountOperation");
   }
 
   virtual void Generate();
@@ -3561,13 +4639,21 @@
 };
 
 
-void CountOperationDeferred::Generate() {
+void DeferredCountOperation::Generate() {
+  CodeGenerator* cgen = generator();
+
+  Result value(cgen);
+  enter()->Bind(&value);
+  value.ToRegister(eax);  // The stubs below expect their argument in eax.
+
   if (is_postfix_) {
     RevertToNumberStub to_number_stub(is_increment_);
-    __ CallStub(&to_number_stub);
+    value = generator()->frame()->CallStub(&to_number_stub, &value, 0);
   }
+
   CounterOpStub stub(result_offset_, is_postfix_, is_increment_);
-  __ CallStub(&stub);
+  value = generator()->frame()->CallStub(&stub, &value, 0);
+  exit_.Jump(&value);
 }
 
 
@@ -3582,52 +4668,95 @@
 
   // Postfix: Make room for the result.
   if (is_postfix) {
-    frame_->Push(Immediate(0));
+    frame_->Push(Smi::FromInt(0));
   }
 
   { Reference target(this, node->expression());
-    if (target.is_illegal()) return;
-    target.GetValue(NOT_INSIDE_TYPEOF);
+    if (target.is_illegal()) {
+      // Spoof the virtual frame to have the expected height (one higher
+      // than on entry).
+      if (!is_postfix) {
+        frame_->Push(Smi::FromInt(0));
+      }
+      return;
+    }
+    target.TakeValue(NOT_INSIDE_TYPEOF);
 
-    CountOperationDeferred* deferred =
-        new CountOperationDeferred(this, is_postfix, is_increment,
+    DeferredCountOperation* deferred =
+        new DeferredCountOperation(this, is_postfix, is_increment,
                                    target.size() * kPointerSize);
 
-    frame_->Pop(eax);  // Load TOS into eax for calculations below
+    Result value = frame_->Pop();
+    value.ToRegister();
+    ASSERT(value.is_valid());
 
     // Postfix: Store the old value as the result.
     if (is_postfix) {
-      __ mov(frame_->Element(target.size()), eax);
+      Result old_value = value;
+      frame_->SetElementAt(target.size(), &old_value);
     }
 
-    // Perform optimistic increment/decrement.
+    // Perform optimistic increment/decrement.  Ensure the value is
+    // writable.
+    frame_->Spill(value.reg());
+    ASSERT(allocator_->count(value.reg()) == 1);
+
+    // In order to combine the overflow and the smi check, we need to
+    // be able to allocate a byte register.  We attempt to do so
+    // without spilling.  If we fail, we will generate separate
+    // overflow and smi checks.
+    //
+    // We need to allocate and clear the temporary byte register
+    // before performing the count operation since clearing the
+    // register using xor will clear the overflow flag.
+    Result tmp = allocator_->AllocateByteRegisterWithoutSpilling();
+    if (tmp.is_valid()) {
+      __ Set(tmp.reg(), Immediate(0));
+    }
+
     if (is_increment) {
-      __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ add(Operand(value.reg()), Immediate(Smi::FromInt(1)));
     } else {
-      __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ sub(Operand(value.reg()), Immediate(Smi::FromInt(1)));
     }
 
     // If the count operation didn't overflow and the result is a
     // valid smi, we're done. Otherwise, we jump to the deferred
     // slow-case code.
-    __ j(overflow, deferred->enter(), not_taken);
-    __ test(eax, Immediate(kSmiTagMask));
-    __ j(not_zero, deferred->enter(), not_taken);
+    //
+    // We combine the overflow and the smi check if we could
+    // successfully allocate a temporary byte register.
+    if (tmp.is_valid()) {
+      __ setcc(overflow, tmp.reg());
+      __ or_(Operand(value.reg()), tmp.reg());
+      tmp.Unuse();
+      __ test(value.reg(), Immediate(kSmiTagMask));
+      deferred->enter()->Branch(not_zero, &value, not_taken);
+    } else {
+      deferred->enter()->Branch(overflow, &value, not_taken);
+      __ test(value.reg(), Immediate(kSmiTagMask));
+      deferred->enter()->Branch(not_zero, &value, not_taken);
+    }
 
     // Store the new value in the target if not const.
-    __ bind(deferred->exit());
-    frame_->Push(eax);  // Push the new value to TOS
-    if (!is_const) target.SetValue(NOT_CONST_INIT);
+    deferred->BindExit(&value);
+    frame_->Push(&value);
+    if (!is_const) {
+      target.SetValue(NOT_CONST_INIT);
+    }
   }
 
   // Postfix: Discard the new value and use the old.
   if (is_postfix) {
-    frame_->Pop();
+    frame_->Drop();
   }
 }
 
 
 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+  // Note that due to an optimization in comparison operations (typeof
+  // compared to a string literal), we can evaluate a binary expression such
+  // as AND or OR and not leave a value on the frame or in the cc register.
   Comment cmnt(masm_, "[ BinaryOperation");
   Token::Value op = node->op();
 
@@ -3636,83 +4765,133 @@
   // before any ToBoolean() conversions. This means that the value
   // produced by a && or || operator is not necessarily a boolean.
 
-  // NOTE: If the left hand side produces a materialized value (not in
-  // the CC register), we force the right hand side to do the
-  // same. This is necessary because we may have to branch to the exit
-  // after evaluating the left hand side (due to the shortcut
-  // semantics), but the compiler must (statically) know if the result
-  // of compiling the binary operation is materialized or not.
-
+  // NOTE: If the left hand side produces a materialized value (not
+  // control flow), we force the right hand side to do the same. This
+  // is necessary because we assume that if we get control flow on the
+  // last path out of an expression we got it on all paths.
   if (op == Token::AND) {
-    Label is_true;
-    LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &is_true,
-                  false_target(), false);
-    if (has_cc()) {
-      Branch(false, false_target());
+    JumpTarget is_true(this);
+    ControlDestination dest(&is_true, destination()->false_target(), true);
+    LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
 
-      // Evaluate right side expression.
-      __ bind(&is_true);
-      LoadCondition(node->right(), NOT_INSIDE_TYPEOF, true_target(),
-                    false_target(), false);
+    if (dest.false_was_fall_through()) {
+      // The current false target was used as the fall-through.  If
+      // there are no dangling jumps to is_true then the left
+      // subexpression was unconditionally false.  Otherwise we have
+      // paths where we do have to evaluate the right subexpression.
+      if (is_true.is_linked()) {
+        // We need to compile the right subexpression.  If the jump to
+        // the current false target was a forward jump then we have a
+        // valid frame, we have just bound the false target, and we
+        // have to jump around the code for the right subexpression.
+        if (has_valid_frame()) {
+          destination()->false_target()->Unuse();
+          destination()->false_target()->Jump();
+        }
+        is_true.Bind();
+        // The left subexpression compiled to control flow, so the
+        // right one is free to do so as well.
+        LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+      } else {
+        // We have actually just jumped to or bound the current false
+        // target but the current control destination is not marked as
+        // used.
+        destination()->Use(false);
+      }
+
+    } else if (dest.is_used()) {
+      // The left subexpression compiled to control flow (and is_true
+      // was just bound), so the right is free to do so as well.
+      LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
 
     } else {
-      Label pop_and_continue, exit;
+      // We have a materialized value on the frame, so we exit with
+      // one on all paths.  There are possibly also jumps to is_true
+      // from nested subexpressions.
+      JumpTarget pop_and_continue(this);
+      JumpTarget exit(this);
 
       // Avoid popping the result if it converts to 'false' using the
       // standard ToBoolean() conversion as described in ECMA-262,
       // section 9.2, page 30.
-       // Duplicate the TOS value. The duplicate will be popped by ToBoolean.
-      __ mov(eax, frame_->Top());
-      frame_->Push(eax);
-      ToBoolean(&pop_and_continue, &exit);
-      Branch(false, &exit);
+      //
+      // Duplicate the TOS value. The duplicate will be popped by
+      // ToBoolean.
+      frame_->Dup();
+      ControlDestination dest(&pop_and_continue, &exit, true);
+      ToBoolean(&dest);
 
       // Pop the result of evaluating the first part.
-      __ bind(&pop_and_continue);
-      frame_->Pop();
+      frame_->Drop();
 
-      // Evaluate right side expression.
-      __ bind(&is_true);
+      // Compile right side expression.
+      is_true.Bind();
       Load(node->right());
 
       // Exit (always with a materialized value).
-      __ bind(&exit);
+      exit.Bind();
     }
 
   } else if (op == Token::OR) {
-    Label is_false;
-    LoadCondition(node->left(), NOT_INSIDE_TYPEOF, true_target(),
-                  &is_false, false);
-    if (has_cc()) {
-      Branch(true, true_target());
+    JumpTarget is_false(this);
+    ControlDestination dest(destination()->true_target(), &is_false, false);
+    LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
 
-      // Evaluate right side expression.
-      __ bind(&is_false);
-      LoadCondition(node->right(), NOT_INSIDE_TYPEOF, true_target(),
-                    false_target(), false);
+    if (dest.true_was_fall_through()) {
+      // The current true target was used as the fall-through.  If
+      // there are no dangling jumps to is_false then the left
+      // subexpression was unconditionally true.  Otherwise we have
+      // paths where we do have to evaluate the right subexpression.
+      if (is_false.is_linked()) {
+        // We need to compile the right subexpression.  If the jump to
+        // the current true target was a forward jump then we have a
+        // valid frame, we have just bound the true target, and we
+        // have to jump around the code for the right subexpression.
+        if (has_valid_frame()) {
+          destination()->true_target()->Unuse();
+          destination()->true_target()->Jump();
+        }
+        is_false.Bind();
+        // The left subexpression compiled to control flow, so the
+        // right one is free to do so as well.
+        LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+      } else {
+        // We have just jumped to or bound the current true target but
+        // the current control destination is not marked as used.
+        destination()->Use(true);
+      }
+
+    } else if (dest.is_used()) {
+      // The left subexpression compiled to control flow (and is_false
+      // was just bound), so the right is free to do so as well.
+      LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
 
     } else {
-      Label pop_and_continue, exit;
+      // We have a materialized value on the frame, so we exit with
+      // one on all paths.  There are possibly also jumps to is_false
+      // from nested subexpressions.
+      JumpTarget pop_and_continue(this);
+      JumpTarget exit(this);
 
       // Avoid popping the result if it converts to 'true' using the
       // standard ToBoolean() conversion as described in ECMA-262,
       // section 9.2, page 30.
-      // Duplicate the TOS value. The duplicate will be popped by ToBoolean.
-      __ mov(eax, frame_->Top());
-      frame_->Push(eax);
-      ToBoolean(&exit, &pop_and_continue);
-      Branch(true, &exit);
+      //
+      // Duplicate the TOS value. The duplicate will be popped by
+      // ToBoolean.
+      frame_->Dup();
+      ControlDestination dest(&exit, &pop_and_continue, false);
+      ToBoolean(&dest);
 
       // Pop the result of evaluating the first part.
-      __ bind(&pop_and_continue);
-      frame_->Pop();
+      frame_->Drop();
 
-      // Evaluate right side expression.
-      __ bind(&is_false);
+      // Compile right side expression.
+      is_false.Bind();
       Load(node->right());
 
       // Exit (always with a materialized value).
-      __ bind(&exit);
+      exit.Bind();
     }
 
   } else {
@@ -3750,7 +4929,7 @@
 
 
 void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-  frame_->Push(frame_->Function());
+  frame_->PushFunction();
 }
 
 
@@ -3773,45 +4952,6 @@
   Expression* left = node->left();
   Expression* right = node->right();
   Token::Value op = node->op();
-
-  // To make null checks efficient, we check if either left or right is the
-  // literal 'null'. If so, we optimize the code by inlining a null check
-  // instead of calling the (very) general runtime routine for checking
-  // equality.
-  if (op == Token::EQ || op == Token::EQ_STRICT) {
-    bool left_is_null =
-        left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
-    bool right_is_null =
-        right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
-    // The 'null' value can only be equal to 'null' or 'undefined'.
-    if (left_is_null || right_is_null) {
-      Load(left_is_null ? right : left);
-      frame_->Pop(eax);
-      __ cmp(eax, Factory::null_value());
-
-      // The 'null' value is only equal to 'undefined' if using non-strict
-      // comparisons.
-      if (op != Token::EQ_STRICT) {
-        __ j(equal, true_target());
-
-        __ cmp(eax, Factory::undefined_value());
-        __ j(equal, true_target());
-
-        __ test(eax, Immediate(kSmiTagMask));
-        __ j(equal, false_target());
-
-        // It can be an undetectable object.
-        __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
-        __ movzx_b(eax, FieldOperand(eax, Map::kBitFieldOffset));
-        __ and_(eax, 1 << Map::kIsUndetectable);
-        __ cmp(eax, 1 << Map::kIsUndetectable);
-      }
-
-      cc_reg_ = equal;
-      return;
-    }
-  }
-
   // To make typeof testing for natives implemented in JavaScript really
   // efficient, we generate special code for expressions of the form:
   // 'typeof <expression> == <string>'.
@@ -3822,86 +4962,96 @@
        right->AsLiteral()->handle()->IsString())) {
     Handle<String> check(String::cast(*right->AsLiteral()->handle()));
 
-    // Load the operand and move it to register edx.
+    // Load the operand and move it to a register.
     LoadTypeofExpression(operation->expression());
-    frame_->Pop(edx);
+    Result answer = frame_->Pop();
+    answer.ToRegister();
 
     if (check->Equals(Heap::number_symbol())) {
-      __ test(edx, Immediate(kSmiTagMask));
-      __ j(zero, true_target());
-      __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
-      __ cmp(edx, Factory::heap_number_map());
-      cc_reg_ = equal;
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->true_target()->Branch(zero);
+      frame_->Spill(answer.reg());
+      __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ cmp(answer.reg(), Factory::heap_number_map());
+      answer.Unuse();
+      destination()->Split(equal);
 
     } else if (check->Equals(Heap::string_symbol())) {
-      __ test(edx, Immediate(kSmiTagMask));
-      __ j(zero, false_target());
-
-      __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->false_target()->Branch(zero);
 
       // It can be an undetectable string object.
-      __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
-      __ and_(ecx, 1 << Map::kIsUndetectable);
-      __ cmp(ecx, 1 << Map::kIsUndetectable);
-      __ j(equal, false_target());
-
-      __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
-      __ cmp(ecx, FIRST_NONSTRING_TYPE);
-      cc_reg_ = less;
+      Result temp = allocator()->Allocate();
+      ASSERT(temp.is_valid());
+      __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kBitFieldOffset));
+      __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+      destination()->false_target()->Branch(not_zero);
+      __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(temp.reg(),
+                 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+      __ cmp(temp.reg(), FIRST_NONSTRING_TYPE);
+      temp.Unuse();
+      answer.Unuse();
+      destination()->Split(less);
 
     } else if (check->Equals(Heap::boolean_symbol())) {
-      __ cmp(edx, Factory::true_value());
-      __ j(equal, true_target());
-      __ cmp(edx, Factory::false_value());
-      cc_reg_ = equal;
+      __ cmp(answer.reg(), Factory::true_value());
+      destination()->true_target()->Branch(equal);
+      __ cmp(answer.reg(), Factory::false_value());
+      answer.Unuse();
+      destination()->Split(equal);
 
     } else if (check->Equals(Heap::undefined_symbol())) {
-      __ cmp(edx, Factory::undefined_value());
-      __ j(equal, true_target());
+      __ cmp(answer.reg(), Factory::undefined_value());
+      destination()->true_target()->Branch(equal);
 
-      __ test(edx, Immediate(kSmiTagMask));
-      __ j(zero, false_target());
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->false_target()->Branch(zero);
 
       // It can be an undetectable object.
-      __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
-      __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
-      __ and_(ecx, 1 << Map::kIsUndetectable);
-      __ cmp(ecx, 1 << Map::kIsUndetectable);
-
-      cc_reg_ = equal;
+      frame_->Spill(answer.reg());
+      __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(answer.reg(),
+                 FieldOperand(answer.reg(), Map::kBitFieldOffset));
+      __ test(answer.reg(), Immediate(1 << Map::kIsUndetectable));
+      answer.Unuse();
+      destination()->Split(not_zero);
 
     } else if (check->Equals(Heap::function_symbol())) {
-      __ test(edx, Immediate(kSmiTagMask));
-      __ j(zero, false_target());
-      __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
-      __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
-      __ cmp(edx, JS_FUNCTION_TYPE);
-      cc_reg_ = equal;
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->false_target()->Branch(zero);
+      frame_->Spill(answer.reg());
+      __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
+      answer.Unuse();
+      destination()->Split(equal);
 
     } else if (check->Equals(Heap::object_symbol())) {
-      __ test(edx, Immediate(kSmiTagMask));
-      __ j(zero, false_target());
-
-      __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-      __ cmp(edx, Factory::null_value());
-      __ j(equal, true_target());
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      destination()->false_target()->Branch(zero);
+      __ cmp(answer.reg(), Factory::null_value());
+      destination()->true_target()->Branch(equal);
 
       // It can be an undetectable object.
-      __ movzx_b(edx, FieldOperand(ecx, Map::kBitFieldOffset));
-      __ and_(edx, 1 << Map::kIsUndetectable);
-      __ cmp(edx, 1 << Map::kIsUndetectable);
-      __ j(equal, false_target());
-
-      __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-      __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
-      __ j(less, false_target());
-      __ cmp(ecx, LAST_JS_OBJECT_TYPE);
-      cc_reg_ = less_equal;
-
+      Result map = allocator()->Allocate();
+      ASSERT(map.is_valid());
+      __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
+      __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
+      destination()->false_target()->Branch(not_zero);
+      __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+      __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
+      __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
+      destination()->false_target()->Branch(less);
+      __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
+      answer.Unuse();
+      map.Unuse();
+      destination()->Split(less_equal);
     } else {
       // Uncommon case: typeof testing against a string literal that is
       // never returned from the typeof operator.
-      __ jmp(false_target());
+      answer.Unuse();
+      destination()->Goto(false);
     }
     return;
   }
@@ -3930,42 +5080,41 @@
     case Token::IN: {
       Load(left);
       Load(right);
-      __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      frame_->Push(eax);  // push the result
+      Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
+      frame_->Push(&answer);  // push the result
       return;
     }
     case Token::INSTANCEOF: {
       Load(left);
       Load(right);
       InstanceofStub stub;
-      __ CallStub(&stub);
-      __ test(eax, Operand(eax));
-      cc_reg_ = zero;
+      Result answer = frame_->CallStub(&stub, 2);
+      answer.ToRegister();
+      __ test(answer.reg(), Operand(answer.reg()));
+      answer.Unuse();
+      destination()->Split(zero);
       return;
     }
     default:
       UNREACHABLE();
   }
-
-  // Optimize for the case where (at least) one of the expressions
-  // is a literal small integer.
-  if (IsInlineSmi(left->AsLiteral())) {
-    Load(right);
-    SmiComparison(ReverseCondition(cc), left->AsLiteral()->handle(), strict);
-    return;
-  }
-  if (IsInlineSmi(right->AsLiteral())) {
-    Load(left);
-    SmiComparison(cc, right->AsLiteral()->handle(), strict);
-    return;
-  }
-
   Load(left);
   Load(right);
-  Comparison(cc, strict);
+  Comparison(cc, strict, destination());
 }
 
 
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() {
+  return (allocator()->count(eax) == frame()->register_count(eax))
+      && (allocator()->count(ebx) == frame()->register_count(ebx))
+      && (allocator()->count(ecx) == frame()->register_count(ecx))
+      && (allocator()->count(edx) == frame()->register_count(edx))
+      && (allocator()->count(edi) == frame()->register_count(edi));
+}
+#endif
+
+
 class DeferredReferenceGetKeyedValue: public DeferredCode {
  public:
   DeferredReferenceGetKeyedValue(CodeGenerator* generator, bool is_global)
@@ -3973,23 +5122,7 @@
     set_comment("[ DeferredReferenceGetKeyedValue");
   }
 
-  virtual void Generate() {
-    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-    // Calculate the delta from the IC call instruction to the map
-    // check cmp instruction in the inlined version.  This delta is
-    // stored in a test(eax, delta) instruction after the call so that
-    // we can find it in the IC initialization code and patch the cmp
-    // instruction.  This means that we cannot allow test instructions
-    // after calls to KeyedLoadIC stubs in other places.
-    int delta_to_patch_site = __ SizeOfCodeGeneratedSince(patch_site());
-    if (is_global_) {
-      __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-    } else {
-      __ call(ic, RelocInfo::CODE_TARGET);
-    }
-    __ test(eax, Immediate(-delta_to_patch_site));
-    __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
-  }
+  virtual void Generate();
 
   Label* patch_site() { return &patch_site_; }
 
@@ -3999,6 +5132,48 @@
 };
 
 
+void DeferredReferenceGetKeyedValue::Generate() {
+  CodeGenerator* cgen = generator();
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  Result receiver(cgen);
+  Result key(cgen);
+  enter()->Bind(&receiver, &key);
+  cgen->frame()->Push(&receiver);  // First IC argument.
+  cgen->frame()->Push(&key);       // Second IC argument.
+
+  // Calculate the delta from the IC call instruction to the map check
+  // cmp instruction in the inlined version.  This delta is stored in
+  // a test(eax, delta) instruction after the call so that we can find
+  // it in the IC initialization code and patch the cmp instruction.
+  // This means that we cannot allow test instructions after calls to
+  // KeyedLoadIC stubs in other places.
+  Result value(cgen);
+  if (is_global_) {
+    value = cgen->frame()->CallCodeObject(ic,
+                                          RelocInfo::CODE_TARGET_CONTEXT,
+                                          0);
+  } else {
+    value = cgen->frame()->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
+  }
+  // The result needs to be specifically the eax register because the
+  // offset to the patch site will be expected in a test eax
+  // instruction.
+  ASSERT(value.is_register() && value.reg().is(eax));
+  // The delta from the start of the map-compare instruction to the
+  // test eax instruction.
+  int delta_to_patch_site = __ SizeOfCodeGeneratedSince(patch_site());
+  __ test(value.reg(), Immediate(-delta_to_patch_site));
+  __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
+
+  // The receiver and key were spilled by the call, so their state as
+  // constants or copies has been changed.  Thus, they need to be
+  // "mergable" in the block at the exit label and are therefore
+  // passed as return results here.
+  key = cgen->frame()->Pop();
+  receiver = cgen->frame()->Pop();
+  exit_.Jump(&receiver, &key, &value);
+}
+
 
 #undef __
 #define __ masm->
@@ -4021,10 +5196,10 @@
 
 
 void Reference::GetValue(TypeofState typeof_state) {
+  ASSERT(!cgen_->in_spilled_code());
+  ASSERT(cgen_->HasValidEntryRegisters());
   ASSERT(!is_illegal());
-  ASSERT(!cgen_->has_cc());
   MacroAssembler* masm = cgen_->masm();
-  VirtualFrame* frame = cgen_->frame();
   switch (type_) {
     case SLOT: {
       Comment cmnt(masm, "[ Load from Slot");
@@ -4041,27 +5216,28 @@
       // thrown below, we must distinguish between the two kinds of
       // loads (typeof expression loads must not throw a reference
       // error).
+      VirtualFrame* frame = cgen_->frame();
       Comment cmnt(masm, "[ Load from named Property");
       Handle<String> name(GetName());
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
       // Setup the name register.
-      __ mov(ecx, name);
-      if (var != NULL) {
-        ASSERT(var->is_global());
-        __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-      } else {
-        __ call(ic, RelocInfo::CODE_TARGET);
-      }
-      // Push the result.
-      frame->Push(eax);
+      Result name_reg = cgen_->allocator()->Allocate(ecx);
+      ASSERT(name_reg.is_valid());
+      __ mov(name_reg.reg(), name);
+      ASSERT(var == NULL || var->is_global());
+      RelocInfo::Mode rmode = (var == NULL)
+                            ? RelocInfo::CODE_TARGET
+                            : RelocInfo::CODE_TARGET_CONTEXT;
+      Result answer = frame->CallCodeObject(ic, rmode, &name_reg, 0);
+      frame->Push(&answer);
       break;
     }
 
     case KEYED: {
-      // TODO(1241834): Make sure that it is safe to ignore the
-      // distinction between expressions in a typeof and not in a
-      // typeof.
+      // TODO(1241834): Make sure that this it is safe to ignore the
+      // distinction between expressions in a typeof and not in a typeof.
+      Comment cmnt(masm, "[ Load from keyed Property");
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       bool is_global = var != NULL;
       ASSERT(!is_global || var->is_global());
@@ -4073,59 +5249,92 @@
         Comment cmnt(masm, "[ Inlined array index load");
         DeferredReferenceGetKeyedValue* deferred =
             new DeferredReferenceGetKeyedValue(cgen_, is_global);
-        // Load receiver and check that it is not a smi (only needed
-        // if this is not a load from the global context) and that it
-        // has the expected map.
-        __ mov(edx, Operand(esp, kPointerSize));
+
+        Result key = cgen_->frame()->Pop();
+        Result receiver = cgen_->frame()->Pop();
+        key.ToRegister();
+        receiver.ToRegister();
+
+        // Check that the receiver is not a smi (only needed if this
+        // is not a load from the global context) and that it has the
+        // expected map.
         if (!is_global) {
-          __ test(edx, Immediate(kSmiTagMask));
-          __ j(zero, deferred->enter(), not_taken);
+          __ test(receiver.reg(), Immediate(kSmiTagMask));
+          deferred->enter()->Branch(zero, &receiver, &key, not_taken);
         }
+
         // Initially, use an invalid map. The map is patched in the IC
         // initialization code.
         __ bind(deferred->patch_site());
-        __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+        __ cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
                Immediate(Factory::null_value()));
-        __ j(not_equal, deferred->enter(), not_taken);
-        // Load key and check that it is a smi.
-        __ mov(eax, Operand(esp, 0));
-        __ test(eax, Immediate(kSmiTagMask));
-        __ j(not_zero, deferred->enter(), not_taken);
-        // Shift to get actual index value.
-        __ sar(eax, kSmiTagSize);
+        deferred->enter()->Branch(not_equal, &receiver, &key, not_taken);
+
+        // Check that the key is a smi.
+        __ test(key.reg(), Immediate(kSmiTagMask));
+        deferred->enter()->Branch(not_zero, &receiver, &key, not_taken);
+
         // Get the elements array from the receiver and check that it
         // is not a dictionary.
-        __ mov(edx, FieldOperand(edx, JSObject::kElementsOffset));
-        __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+        Result elements = cgen_->allocator()->Allocate();
+        ASSERT(elements.is_valid());
+        __ mov(elements.reg(),
+               FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+        __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
                Immediate(Factory::hash_table_map()));
-        __ j(equal, deferred->enter(), not_taken);
-        // Check that key is within bounds.
-        __ cmp(eax, FieldOperand(edx, Array::kLengthOffset));
-        __ j(above_equal, deferred->enter(), not_taken);
-        // Load and check that the result is not the hole.
-        __ mov(eax,
-               Operand(edx, eax, times_4, Array::kHeaderSize - kHeapObjectTag));
-        __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
-        __ j(equal, deferred->enter(), not_taken);
+        deferred->enter()->Branch(equal, &receiver, &key, not_taken);
+
+        // Shift the key to get the actual index value and check that
+        // it is within bounds.
+        Result index = cgen_->allocator()->Allocate();
+        ASSERT(index.is_valid());
+        __ mov(index.reg(), key.reg());
+        __ sar(index.reg(), kSmiTagSize);
+        __ cmp(index.reg(),
+               FieldOperand(elements.reg(), Array::kLengthOffset));
+        deferred->enter()->Branch(above_equal, &receiver, &key, not_taken);
+
+        // Load and check that the result is not the hole.  We could
+        // reuse the index or elements register for the value.
+        //
+        // TODO(206): Consider whether it makes sense to try some
+        // heuristic about which register to reuse.  For example, if
+        // one is eax, the we can reuse that one because the value
+        // coming from the deferred code will be in eax.
+        Result value = index;
+        __ mov(value.reg(), Operand(elements.reg(),
+                                    index.reg(),
+                                    times_4,
+                                    Array::kHeaderSize - kHeapObjectTag));
+        elements.Unuse();
+        index.Unuse();
+        __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+        deferred->enter()->Branch(equal, &receiver, &key, not_taken);
         __ IncrementCounter(&Counters::keyed_load_inline, 1);
-        __ bind(deferred->exit());
+
+        // Restore the receiver and key to the frame and push the
+        // result on top of it.
+        deferred->BindExit(&receiver, &key, &value);
+        cgen_->frame()->Push(&receiver);
+        cgen_->frame()->Push(&key);
+        cgen_->frame()->Push(&value);
+
       } else {
+        VirtualFrame* frame = cgen_->frame();
         Comment cmnt(masm, "[ Load from keyed Property");
         Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-        if (is_global) {
-          __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-        } else {
-          __ call(ic, RelocInfo::CODE_TARGET);
-        }
+        RelocInfo::Mode rmode = is_global
+                              ? RelocInfo::CODE_TARGET_CONTEXT
+                              : RelocInfo::CODE_TARGET;
+        Result answer = frame->CallCodeObject(ic, rmode, 0);
         // Make sure that we do not have a test instruction after the
         // call.  A test instruction after the call is used to
         // indicate that we have generated an inline version of the
         // keyed load.  The explicit nop instruction is here because
         // the push that follows might be peep-hole optimized away.
         __ nop();
+        frame->Push(&answer);
       }
-      // Push the result.
-      frame->Push(eax);
       break;
     }
 
@@ -4135,9 +5344,39 @@
 }
 
 
-void Reference::SetValue(InitState init_state) {
+void Reference::TakeValue(TypeofState typeof_state) {
+  // For non-constant frame-allocated slots, we invalidate the value in the
+  // slot.  For all others, we fall back on GetValue.
+  ASSERT(!cgen_->in_spilled_code());
   ASSERT(!is_illegal());
-  ASSERT(!cgen_->has_cc());
+  if (type_ != SLOT) {
+    GetValue(typeof_state);
+    return;
+  }
+
+  Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+  ASSERT(slot != NULL);
+  if (slot->type() == Slot::LOOKUP ||
+      slot->type() == Slot::CONTEXT ||
+      slot->var()->mode() == Variable::CONST) {
+    GetValue(typeof_state);
+    return;
+  }
+
+  // Only non-constant, frame-allocated parameters and locals can reach
+  // here.
+  if (slot->type() == Slot::PARAMETER) {
+    cgen_->frame()->TakeParameterAt(slot->index());
+  } else {
+    ASSERT(slot->type() == Slot::LOCAL);
+    cgen_->frame()->TakeLocalAt(slot->index());
+  }
+}
+
+
+void Reference::SetValue(InitState init_state) {
+  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(!is_illegal());
   MacroAssembler* masm = cgen_->masm();
   VirtualFrame* frame = cgen_->frame();
   switch (type_) {
@@ -4145,74 +5384,7 @@
       Comment cmnt(masm, "[ Store to Slot");
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       ASSERT(slot != NULL);
-      if (slot->type() == Slot::LOOKUP) {
-        ASSERT(slot->var()->is_dynamic());
-
-        // For now, just do a runtime call.
-        frame->Push(esi);
-        frame->Push(Immediate(slot->var()->name()));
-
-        if (init_state == CONST_INIT) {
-          // Same as the case for a normal store, but ignores attribute
-          // (e.g. READ_ONLY) of context slot so that we can initialize
-          // const properties (introduced via eval("const foo = (some
-          // expr);")). Also, uses the current function context instead of
-          // the top context.
-          //
-          // Note that we must declare the foo upon entry of eval(), via a
-          // context slot declaration, but we cannot initialize it at the
-          // same time, because the const declaration may be at the end of
-          // the eval code (sigh...) and the const variable may have been
-          // used before (where its value is 'undefined'). Thus, we can only
-          // do the initialization when we actually encounter the expression
-          // and when the expression operands are defined and valid, and
-          // thus we need the split into 2 operations: declaration of the
-          // context slot followed by initialization.
-          __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
-        } else {
-          __ CallRuntime(Runtime::kStoreContextSlot, 3);
-        }
-        // Storing a variable must keep the (new) value on the expression
-        // stack. This is necessary for compiling chained assignment
-        // expressions.
-        frame->Push(eax);
-
-      } else {
-        ASSERT(!slot->var()->is_dynamic());
-
-        Label exit;
-        if (init_state == CONST_INIT) {
-          ASSERT(slot->var()->mode() == Variable::CONST);
-          // Only the first const initialization must be executed (the slot
-          // still contains 'the hole' value). When the assignment is
-          // executed, the code is identical to a normal store (see below).
-          Comment cmnt(masm, "[ Init const");
-          __ mov(eax, cgen_->SlotOperand(slot, ecx));
-          __ cmp(eax, Factory::the_hole_value());
-          __ j(not_equal, &exit);
-        }
-
-        // We must execute the store.  Storing a variable must keep the
-        // (new) value on the stack. This is necessary for compiling
-        // assignment expressions.
-        //
-        // Note: We will reach here even with slot->var()->mode() ==
-        // Variable::CONST because of const declarations which will
-        // initialize consts to 'the hole' value and by doing so, end up
-        // calling this code.
-        frame->Pop(eax);
-        __ mov(cgen_->SlotOperand(slot, ecx), eax);
-        frame->Push(eax);  // RecordWrite may destroy the value in eax.
-        if (slot->type() == Slot::CONTEXT) {
-          // ecx is loaded with context when calling SlotOperand above.
-          int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-          __ RecordWrite(ecx, offset, eax, ebx);
-        }
-        // If we definitely did not jump over the assignment, we do not need
-        // to bind the exit label.  Doing so can defeat peephole
-        // optimization.
-        if (init_state == CONST_INIT) __ bind(&exit);
-      }
+      cgen_->StoreToSlot(slot, init_state);
       break;
     }
 
@@ -4222,11 +5394,16 @@
       Handle<String> name(GetName());
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
       // TODO(1222589): Make the IC grab the values from the stack.
-      frame->Pop(eax);
+      Result argument = frame->Pop();
+      argument.ToRegister(eax);
+      ASSERT(argument.is_valid());
+      Result property_name = cgen_->allocator()->Allocate(ecx);
+      ASSERT(property_name.is_valid());
       // Setup the name register.
-      __ mov(ecx, name);
-      __ call(ic, RelocInfo::CODE_TARGET);
-      frame->Push(eax);  // IC call leaves result in eax, push it out
+      __ mov(property_name.reg(), name);
+      Result answer = frame->CallCodeObject(ic, RelocInfo::CODE_TARGET,
+                                            &argument, &property_name, 0);
+      frame->Push(&answer);
       break;
     }
 
@@ -4235,9 +5412,12 @@
       // Call IC code.
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       // TODO(1222589): Make the IC grab the values from the stack.
-      frame->Pop(eax);
-      __ call(ic, RelocInfo::CODE_TARGET);
-      frame->Push(eax);  // IC call leaves result in eax, push it out
+      Result arg = frame->Pop();
+      arg.ToRegister(eax);
+      ASSERT(arg.is_valid());
+      Result answer = frame->CallCodeObject(ic, RelocInfo::CODE_TARGET,
+                                            &arg, 0);
+      frame->Push(&answer);
       break;
     }
 
@@ -4304,6 +5484,274 @@
 }
 
 
+#undef __
+#define __ masm_->
+
+Result DeferredInlineBinaryOperation::GenerateInlineCode() {
+  // Perform fast-case smi code for the operation (left <op> right) and
+  // returns the result in a Result.
+  // If any fast-case tests fail, it jumps to the slow-case deferred code,
+  // which calls the binary operation stub, with the arguments (in registers)
+  // on top of the frame.
+
+  VirtualFrame* frame = generator()->frame();
+  // If operation is division or modulus, ensure
+  // that the special registers needed are free.
+  Result reg_eax(generator());  // Valid only if op is DIV or MOD.
+  Result reg_edx(generator());  // Valid only if op is DIV or MOD.
+  if (op_ == Token::DIV || op_ == Token::MOD) {
+    reg_eax = generator()->allocator()->Allocate(eax);
+    ASSERT(reg_eax.is_valid());
+    reg_edx = generator()->allocator()->Allocate(edx);
+    ASSERT(reg_edx.is_valid());
+  }
+
+  Result right = frame->Pop();
+  Result left = frame->Pop();
+  left.ToRegister();
+  right.ToRegister();
+  // Answer is used to compute the answer, leaving left and right unchanged.
+  // It is also returned from this function.
+  // It is used as a temporary register in a few places, as well.
+  Result answer(generator());
+  if (reg_eax.is_valid()) {
+    answer = reg_eax;
+  } else {
+    answer = generator()->allocator()->Allocate();
+  }
+  ASSERT(answer.is_valid());
+  // Perform the smi check.
+  __ mov(answer.reg(), Operand(left.reg()));
+  __ or_(answer.reg(), Operand(right.reg()));
+  ASSERT(kSmiTag == 0);  // adjust zero check if not the case
+  __ test(answer.reg(), Immediate(kSmiTagMask));
+  enter()->Branch(not_zero, &left, &right, not_taken);
+
+  // All operations start by copying the left argument into answer.
+  __ mov(answer.reg(), Operand(left.reg()));
+  switch (op_) {
+    case Token::ADD:
+      __ add(answer.reg(), Operand(right.reg()));  // add optimistically
+      enter()->Branch(overflow, &left, &right, not_taken);
+      break;
+
+    case Token::SUB:
+      __ sub(answer.reg(), Operand(right.reg()));  // subtract optimistically
+      enter()->Branch(overflow, &left, &right, not_taken);
+      break;
+
+
+    case Token::MUL: {
+      // If the smi tag is 0 we can just leave the tag on one operand.
+      ASSERT(kSmiTag == 0);  // adjust code below if not the case
+      // Remove tag from the left operand (but keep sign).
+      // Left hand operand has been copied into answer.
+      __ sar(answer.reg(), kSmiTagSize);
+      // Do multiplication of smis, leaving result in answer.
+      __ imul(answer.reg(), Operand(right.reg()));
+      // Go slow on overflows.
+      enter()->Branch(overflow, &left, &right, not_taken);
+      // Check for negative zero result.  If product is zero,
+      // and one argument is negative, go to slow case.
+      // The frame is unchanged in this block, so local control flow can
+      // use a Label rather than a JumpTarget.
+      Label non_zero_result;
+      __ test(answer.reg(), Operand(answer.reg()));
+      __ j(not_zero, &non_zero_result, taken);
+      __ mov(answer.reg(), Operand(left.reg()));
+      __ or_(answer.reg(), Operand(right.reg()));
+      enter()->Branch(negative, &left, &right, not_taken);
+      __ xor_(answer.reg(), Operand(answer.reg()));  // Positive 0 is correct.
+      __ bind(&non_zero_result);
+      break;
+    }
+
+    case Token::DIV: {
+      // Left hand argument has been copied into answer, which is eax.
+      // Sign extend eax into edx:eax.
+      __ cdq();
+      // Check for 0 divisor.
+      __ test(right.reg(), Operand(right.reg()));
+      enter()->Branch(zero, &left, &right, not_taken);
+      // Divide edx:eax by ebx.
+      __ idiv(right.reg());
+      // Check for negative zero result.  If result is zero, and divisor
+      // is negative, return a floating point negative zero.
+      // The frame is unchanged in this block, so local control flow can
+      // use a Label rather than a JumpTarget.
+      Label non_zero_result;
+      __ test(left.reg(), Operand(left.reg()));
+      __ j(not_zero, &non_zero_result, taken);
+      __ test(right.reg(), Operand(right.reg()));
+      enter()->Branch(negative, &left, &right, not_taken);
+      __ bind(&non_zero_result);
+      // Check for the corner case of dividing the most negative smi
+      // by -1. We cannot use the overflow flag, since it is not set
+      // by idiv instruction.
+      ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+      __ cmp(reg_eax.reg(), 0x40000000);
+      enter()->Branch(equal, &left, &right, not_taken);
+      // Check that the remainder is zero.
+      __ test(reg_edx.reg(), Operand(reg_edx.reg()));
+      enter()->Branch(not_zero, &left, &right, not_taken);
+      // Tag the result and store it in register temp.
+      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+      __ lea(answer.reg(), Operand(eax, eax, times_1, kSmiTag));
+      break;
+    }
+
+    case Token::MOD: {
+      // Left hand argument has been copied into answer, which is eax.
+      // Sign extend eax into edx:eax.
+      __ cdq();
+      // Check for 0 divisor.
+      __ test(right.reg(), Operand(right.reg()));
+      enter()->Branch(zero, &left, &right, not_taken);
+
+      // Divide edx:eax by ebx.
+      __ idiv(right.reg());
+      // Check for negative zero result.  If result is zero, and divisor
+      // is negative, return a floating point negative zero.
+      // The frame is unchanged in this block, so local control flow can
+      // use a Label rather than a JumpTarget.
+      Label non_zero_result;
+      __ test(reg_edx.reg(), Operand(reg_edx.reg()));
+      __ j(not_zero, &non_zero_result, taken);
+      __ test(left.reg(), Operand(left.reg()));
+      enter()->Branch(negative, &left, &right, not_taken);
+      __ bind(&non_zero_result);
+      // The answer is in edx.
+      answer = reg_edx;
+      break;
+    }
+
+    case Token::BIT_OR:
+      __ or_(answer.reg(), Operand(right.reg()));
+      break;
+
+    case Token::BIT_AND:
+      __ and_(answer.reg(), Operand(right.reg()));
+      break;
+
+    case Token::BIT_XOR:
+      __ xor_(answer.reg(), Operand(right.reg()));
+      break;
+
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR:
+      // Move right into ecx.
+      // Left is in two registers already, so even if left or answer is ecx,
+      // we can move right to it, and use the other one.
+      // Right operand must be in register cl because x86 likes it that way.
+      if (right.reg().is(ecx)) {
+        // Right is already in the right place.  Left may be in the
+        // same register, which causes problems.  Use answer instead.
+        if (left.reg().is(ecx)) {
+          left = answer;
+        }
+      } else if (left.reg().is(ecx)) {
+        generator()->frame()->Spill(left.reg());
+        __ mov(left.reg(), Operand(right.reg()));
+        right = left;
+        left = answer;  // Use copy of left in answer as left.
+      } else if (answer.reg().is(ecx)) {
+        __ mov(answer.reg(), Operand(right.reg()));
+        right = answer;
+      } else {
+        Result reg_ecx = generator()->allocator()->Allocate(ecx);
+        ASSERT(reg_ecx.is_valid());
+        __ mov(reg_ecx.reg(), Operand(right.reg()));
+        right = reg_ecx;
+      }
+      ASSERT(left.reg().is_valid());
+      ASSERT(!left.reg().is(ecx));
+      ASSERT(right.reg().is(ecx));
+      answer.Unuse();  // Answer may now be being used for left or right.
+      // We will modify left and right, which we do not do in any other
+      // binary operation.  The exits to slow code need to restore the
+      // original values of left and right, or at least values that give
+      // the same answer.
+
+      // We are modifying left and right.  They must be spilled!
+      generator()->frame()->Spill(left.reg());
+      generator()->frame()->Spill(right.reg());
+
+      // Remove tags from operands (but keep sign).
+      __ sar(left.reg(), kSmiTagSize);
+      __ sar(ecx, kSmiTagSize);
+      // Perform the operation.
+      switch (op_) {
+        case Token::SAR:
+          __ sar(left.reg());
+          // No checks of result necessary
+          break;
+        case Token::SHR: {
+          __ shr(left.reg());
+          // Check that the *unsigned* result fits in a smi.
+          // Neither of the two high-order bits can be set:
+          // - 0x80000000: high bit would be lost when smi tagging.
+          // - 0x40000000: this number would convert to negative when
+          // Smi tagging these two cases can only happen with shifts
+          // by 0 or 1 when handed a valid smi.
+          // If the answer cannot be represented by a SMI, restore
+          // the left and right arguments, and jump to slow case.
+          // The low bit of the left argument may be lost, but only
+          // in a case where it is dropped anyway.
+          JumpTarget result_ok(generator());
+          __ test(left.reg(), Immediate(0xc0000000));
+          result_ok.Branch(zero, &left, &right, taken);
+          __ shl(left.reg());
+          ASSERT(kSmiTag == 0);
+          __ shl(left.reg(), kSmiTagSize);
+          __ shl(right.reg(), kSmiTagSize);
+          enter()->Jump(&left, &right);
+          result_ok.Bind(&left, &right);
+          break;
+        }
+        case Token::SHL: {
+          __ shl(left.reg());
+          // Check that the *signed* result fits in a smi.
+          //
+          // TODO(207): Can reduce registers from 4 to 3 by
+          // preallocating ecx.
+          JumpTarget result_ok(generator());
+          Result smi_test_reg = generator()->allocator()->Allocate();
+          ASSERT(smi_test_reg.is_valid());
+          __ lea(smi_test_reg.reg(), Operand(left.reg(), 0x40000000));
+          __ test(smi_test_reg.reg(), Immediate(0x80000000));
+          smi_test_reg.Unuse();
+          result_ok.Branch(zero, &left, &right, taken);
+          __ shr(left.reg());
+          ASSERT(kSmiTag == 0);
+          __ shl(left.reg(), kSmiTagSize);
+          __ shl(right.reg(), kSmiTagSize);
+          enter()->Jump(&left, &right);
+          result_ok.Bind(&left, &right);
+          break;
+        }
+        default:
+          UNREACHABLE();
+      }
+      // Smi-tag the result, in left, and make answer an alias for left.
+      answer = left;
+      answer.ToRegister();
+      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+      __ lea(answer.reg(),
+             Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+      break;
+
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return answer;
+}
+
+
+#undef __
+#define __ masm->
+
 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
   // Perform fast-case smi code for the operation (eax <op> ebx) and
   // leave result in register eax.
@@ -5071,10 +6519,8 @@
   // Check that the function really is a JavaScript function.
   __ test(edi, Immediate(kSmiTagMask));
   __ j(zero, &slow, not_taken);
-  // Get the map.
-  __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  __ cmp(ecx, JS_FUNCTION_TYPE);
+  // Goto slow case if we do not have a function.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
   __ j(not_equal, &slow, not_taken);
 
   // Fast-case: Just invoke the function.
diff --git a/src/codegen-ia32.h b/src/codegen-ia32.h
index d79de37..dfa5978 100644
--- a/src/codegen-ia32.h
+++ b/src/codegen-ia32.h
@@ -29,6 +29,7 @@
 #define V8_CODEGEN_IA32_H_
 
 #include "scopes.h"
+#include "register-allocator.h"
 
 namespace v8 { namespace internal {
 
@@ -43,61 +44,6 @@
 
 
 // -------------------------------------------------------------------------
-// Virtual frame
-
-class VirtualFrame BASE_EMBEDDED {
- public:
-  explicit VirtualFrame(CodeGenerator* cgen);
-
-  void Enter();
-  void Exit();
-
-  void AllocateLocals();
-
-  Operand Top() const { return Operand(esp, 0); }
-
-  Operand Element(int index) const {
-    return Operand(esp, index * kPointerSize);
-  }
-
-  Operand Local(int index) const {
-    ASSERT(0 <= index && index < frame_local_count_);
-    return Operand(ebp, kLocal0Offset - index * kPointerSize);
-  }
-
-  Operand Function() const { return Operand(ebp, kFunctionOffset); }
-
-  Operand Context() const { return Operand(ebp, kContextOffset); }
-
-  Operand Parameter(int index) const {
-    ASSERT(-1 <= index && index < parameter_count_);
-    return Operand(ebp, (1 + parameter_count_ - index) * kPointerSize);
-  }
-
-  Operand Receiver() const { return Parameter(-1); }
-
-  inline void Drop(int count);
-
-  inline void Pop();
-  inline void Pop(Register reg);
-  inline void Pop(Operand operand);
-
-  inline void Push(Register reg);
-  inline void Push(Operand operand);
-  inline void Push(Immediate immediate);
-
- private:
-  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
-  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
-  static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
-  MacroAssembler* masm_;
-  int frame_local_count_;
-  int parameter_count_;
-};
-
-
-// -------------------------------------------------------------------------
 // Reference support
 
 // A reference is a C++ stack-allocated object that keeps an ECMA
@@ -136,6 +82,16 @@
   // the expression stack, and it is left in place with its value above it.
   void GetValue(TypeofState typeof_state);
 
+  // Generate code to push the value of a reference on top of the expression
+  // stack and then spill the stack frame.  This function is used temporarily
+  // while the code generator is being transformed.
+  inline void GetValueAndSpill(TypeofState typeof_state);
+
+  // Like GetValue except that the slot is expected to be written to before
+  // being read from again.  Thae value of the reference may be invalidated,
+  // causing subsequent attempts to read it to fail.
+  void TakeValue(TypeofState typeof_state);
+
   // Generate code to store the value on top of the expression stack in the
   // reference.  The reference is expected to be immediately below the value
   // on the expression stack.  The stored value is left in place (with the
@@ -150,12 +106,141 @@
 
 
 // -------------------------------------------------------------------------
+// Control destinations.
+
+// A control destination encapsulates a pair of jump targets and a
+// flag indicating which one is the preferred fall-through.  The
+// preferred fall-through must be unbound, the other may be already
+// bound (ie, a backward target).
+//
+// The true and false targets may be jumped to unconditionally or
+// control may split conditionally.  Unconditional jumping and
+// splitting should be emitted in tail position (as the last thing
+// when compiling an expression) because they can cause either label
+// to be bound or the non-fall through to be jumped to leaving an
+// invalid virtual frame.
+//
+// The labels in the control destination can be extracted and
+// manipulated normally without affecting the state of the
+// destination.
+
+class ControlDestination BASE_EMBEDDED {
+ public:
+  ControlDestination(JumpTarget* true_target,
+                     JumpTarget* false_target,
+                     bool true_is_fall_through)
+      : true_target_(true_target),
+        false_target_(false_target),
+        true_is_fall_through_(true_is_fall_through),
+        is_used_(false) {
+    ASSERT(true_is_fall_through ? !true_target->is_bound()
+                                : !false_target->is_bound());
+  }
+
+  // Accessors for the jump targets.  Directly jumping or branching to
+  // or binding the targets will not update the destination's state.
+  JumpTarget* true_target() const { return true_target_; }
+  JumpTarget* false_target() const { return false_target_; }
+
+  // True if the the destination has been jumped to unconditionally or
+  // control has been split to both targets.  This predicate does not
+  // test whether the targets have been extracted and manipulated as
+  // raw jump targets.
+  bool is_used() const { return is_used_; }
+
+  // True if the destination is used and the true target (respectively
+  // false target) was the fall through.  If the target is backward,
+  // "fall through" included jumping unconditionally to it.
+  bool true_was_fall_through() const {
+    return is_used_ && true_is_fall_through_;
+  }
+
+  bool false_was_fall_through() const {
+    return is_used_ && !true_is_fall_through_;
+  }
+
+  // Emit a branch to one of the true or false targets, and bind the
+  // other target.  Because this binds the fall-through target, it
+  // should be emitted in tail position (as the last thing when
+  // compiling an expression).
+  void Split(Condition cc) {
+    ASSERT(!is_used_);
+    if (true_is_fall_through_) {
+      false_target_->Branch(NegateCondition(cc));
+      true_target_->Bind();
+    } else {
+      true_target_->Branch(cc);
+      false_target_->Bind();
+    }
+    is_used_ = true;
+  }
+
+  // Emit an unconditional jump in tail position, to the true target
+  // (if the argument is true) or the false target.  The "jump" will
+  // actually bind the jump target if it is forward, jump to it if it
+  // is backward.
+  void Goto(bool where) {
+    ASSERT(!is_used_);
+    JumpTarget* target = where ? true_target_ : false_target_;
+    if (target->is_bound()) {
+      target->Jump();
+    } else {
+      target->Bind();
+    }
+    is_used_ = true;
+    true_is_fall_through_ = where;
+  }
+
+  // Mark this jump target as used as if Goto had been called, but
+  // without generating a jump or binding a label (the control effect
+  // should have already happened).  This is used when the left
+  // subexpression of the short-circuit boolean operators are
+  // compiled.
+  void Use(bool where) {
+    ASSERT(!is_used_);
+    ASSERT((where ? true_target_ : false_target_)->is_bound());
+    is_used_ = true;
+    true_is_fall_through_ = where;
+  }
+
+  // Swap the true and false targets but keep the same actual label as
+  // the fall through.  This is used when compiling negated
+  // expressions, where we want to swap the targets but preserve the
+  // state.
+  void Invert() {
+    JumpTarget* temp_target = true_target_;
+    true_target_ = false_target_;
+    false_target_ = temp_target;
+
+    true_is_fall_through_ = !true_is_fall_through_;
+  }
+
+ private:
+  // True and false jump targets.
+  JumpTarget* true_target_;
+  JumpTarget* false_target_;
+
+  // Before using the destination: true if the true target is the
+  // preferred fall through, false if the false target is.  After
+  // using the destination: true if the true target was actually used
+  // as the fall through, false if the false target was.
+  bool true_is_fall_through_;
+
+  // True if the Split or Goto functions have been called.
+  bool is_used_;
+};
+
+
+// -------------------------------------------------------------------------
 // Code generation state
 
 // The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair).  It is threaded through the
-// call stack.  Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
+// the form of the state of the jump target pair).  It is threaded through
+// the call stack.  Constructing a state implicitly pushes it on the owning
+// code generator's stack of states, and destroying one implicitly pops it.
+//
+// The code generator state is only used for expressions, so statements have
+// the initial state.
 
 class CodeGenState BASE_EMBEDDED {
  public:
@@ -164,26 +249,34 @@
   explicit CodeGenState(CodeGenerator* owner);
 
   // Create a code generator state based on a code generator's current
-  // state.  The new state has its own access type and pair of branch
-  // labels, and no reference.
+  // state.  The new state may or may not be inside a typeof, and has its
+  // own control destination.
   CodeGenState(CodeGenerator* owner,
                TypeofState typeof_state,
-               Label* true_target,
-               Label* false_target);
+               ControlDestination* destination);
 
   // Destroy a code generator state and restore the owning code generator's
   // previous state.
   ~CodeGenState();
 
+  // Accessors for the state.
   TypeofState typeof_state() const { return typeof_state_; }
-  Label* true_target() const { return true_target_; }
-  Label* false_target() const { return false_target_; }
+  ControlDestination* destination() const { return destination_; }
 
  private:
+  // The owning code generator.
   CodeGenerator* owner_;
+
+  // A flag indicating whether we are compiling the immediate subexpression
+  // of a typeof expression.
   TypeofState typeof_state_;
-  Label* true_target_;
-  Label* false_target_;
+
+  // A control destination in case the expression has a control-flow
+  // effect.
+  ControlDestination* destination_;
+
+  // The previous state of the owning code generator, restored when
+  // this state is destroyed.
   CodeGenState* previous_;
 };
 
@@ -219,11 +312,26 @@
 
   VirtualFrame* frame() const { return frame_; }
 
+  bool has_valid_frame() const { return frame_ != NULL; }
+
+  // Set the virtual frame to be new_frame, with non-frame register
+  // reference counts given by non_frame_registers.  The non-frame
+  // register reference counts of the old frame are returned in
+  // non_frame_registers.
+  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
+
+  void DeleteFrame();
+
+  RegisterAllocator* allocator() const { return allocator_; }
+
   CodeGenState* state() { return state_; }
   void set_state(CodeGenState* state) { state_ = state; }
 
   void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
 
+  bool in_spilled_code() const { return in_spilled_code_; }
+  void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
+
  private:
   // Construction/Destruction
   CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
@@ -232,15 +340,15 @@
   // Accessors
   Scope* scope() const { return scope_; }
 
+  // Clearing and generating deferred code.
+  void ClearDeferred();
   void ProcessDeferred();
 
   bool is_eval() { return is_eval_; }
 
   // State
-  bool has_cc() const  { return cc_reg_ >= 0; }
   TypeofState typeof_state() const { return state_->typeof_state(); }
-  Label* true_target() const  { return state_->true_target(); }
-  Label* false_target() const  { return state_->false_target(); }
+  ControlDestination* destination() const { return state_->destination(); }
 
   // Track loop nesting level.
   int loop_nesting() const { return loop_nesting_; }
@@ -249,14 +357,48 @@
 
 
   // Node visitors.
+  void VisitStatements(ZoneList<Statement*>* statements);
+
 #define DEF_VISIT(type) \
   void Visit##type(type* node);
   NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
+  // Visit a statement and then spill the virtual frame if control flow can
+  // reach the end of the statement (ie, it does not exit via break,
+  // continue, return, or throw).  This function is used temporarily while
+  // the code generator is being transformed.
+  void VisitAndSpill(Statement* statement) {
+    ASSERT(in_spilled_code());
+    set_in_spilled_code(false);
+    Visit(statement);
+    if (frame_ != NULL) {
+      frame_->SpillAll();
+    }
+    set_in_spilled_code(true);
+  }
+
+  // Visit a list of statements and then spill the virtual frame if control
+  // flow can reach the end of the list.
+  void VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+    ASSERT(in_spilled_code());
+    set_in_spilled_code(false);
+    VisitStatements(statements);
+    if (frame_ != NULL) {
+      frame_->SpillAll();
+    }
+    set_in_spilled_code(true);
+  }
+
   // Main code generation function
   void GenCode(FunctionLiteral* fun);
 
+  // Generate the return sequence code.  Should be called no more than once
+  // per compiled function (it binds the return target, which can not be
+  // done more than once).  The return value is assumed to be in eax by the
+  // code generated.
+  void GenerateReturnSequence();
+
   // The following are used by class Reference.
   void LoadReference(Reference* ref);
   void UnloadReference(Reference* ref);
@@ -268,8 +410,8 @@
   Operand SlotOperand(Slot* slot, Register tmp);
 
   Operand ContextSlotOperandCheckExtensions(Slot* slot,
-                                            Register tmp,
-                                            Label* slow);
+                                            Result tmp,
+                                            JumpTarget* slow);
 
   // Expressions
   Operand GlobalObject() const {
@@ -278,19 +420,49 @@
 
   void LoadCondition(Expression* x,
                      TypeofState typeof_state,
-                     Label* true_target,
-                     Label* false_target,
-                     bool force_cc);
+                     ControlDestination* destination,
+                     bool force_control);
   void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
   void LoadGlobal();
-  void LoadGlobalReceiver(Register scratch);
+  void LoadGlobalReceiver();
+
+  // Generate code to push the value of an expression on top of the frame
+  // and then spill the frame fully to memory.  This function is used
+  // temporarily while the code generator is being transformed.
+  void LoadAndSpill(Expression* expression,
+                    TypeofState typeof_state = NOT_INSIDE_TYPEOF) {
+    ASSERT(in_spilled_code());
+    set_in_spilled_code(false);
+    Load(expression, typeof_state);
+    frame_->SpillAll();
+    set_in_spilled_code(true);
+  }
+
+  // Call LoadCondition and then spill the virtual frame unless control flow
+  // cannot reach the end of the expression (ie, by emitting only
+  // unconditional jumps to the control targets).
+  void LoadConditionAndSpill(Expression* expression,
+                             TypeofState typeof_state,
+                             ControlDestination* destination,
+                             bool force_control) {
+    ASSERT(in_spilled_code());
+    set_in_spilled_code(false);
+    LoadCondition(expression, typeof_state, destination, force_control);
+    if (frame_ != NULL) {
+      frame_->SpillAll();
+    }
+    set_in_spilled_code(true);
+  }
 
   // Read a value from a slot and leave it on top of the expression stack.
   void LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  void LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                         TypeofState typeof_state,
-                                         Register tmp,
-                                         Label* slow);
+  Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
+                                           TypeofState typeof_state,
+                                           JumpTarget* slow);
+
+  // Store the value on top of the expression stack into a slot, leaving the
+  // value in place.
+  void StoreToSlot(Slot* slot, InitState init_state);
 
   // Special code for typeof expressions: Unfortunately, we must
   // be careful when loading the expression in 'typeof'
@@ -300,19 +472,28 @@
   // through the context chain.
   void LoadTypeofExpression(Expression* x);
 
-  void ToBoolean(Label* true_target, Label* false_target);
+  // Translate the value on top of the frame into control flow to the
+  // control destination.
+  void ToBoolean(ControlDestination* destination);
 
   void GenericBinaryOperation(Token::Value op,
       StaticType* type,
       const OverwriteMode overwrite_mode = NO_OVERWRITE);
 
-  void Comparison(Condition cc, bool strict = false);
+  void Comparison(Condition cc,
+                  bool strict,
+                  ControlDestination* destination);
 
-  // Inline small integer literals. To prevent long attacker-controlled byte
-  // sequences, we only inline small Smis.
+  // To prevent long attacker-controlled byte sequences, integer constants
+  // from the JavaScript source are loaded in two parts if they are larger
+  // than 16 bits.
   static const int kMaxSmiInlinedBits = 16;
+  bool IsUnsafeSmi(Handle<Object> value);
+  // Load an integer constant x into a register target using
+  // at most 16 bits of user-controlled data per assembly operation.
+  void LoadUnsafeSmi(Register target, Handle<Object> value);
+
   bool IsInlineSmi(Literal* literal);
-  void SmiComparison(Condition cc,  Handle<Object> value, bool strict = false);
   void SmiOperation(Token::Value op,
                     StaticType* type,
                     Handle<Object> value,
@@ -321,10 +502,7 @@
 
   void CallWithArguments(ZoneList<Expression*>* arguments, int position);
 
-  // Control flow
-  void Branch(bool if_true, Label* L);
   void CheckStack();
-  void CleanStack(int num_bytes);
 
   bool CheckForInlineRuntimeCall(CallRuntime* node);
   Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
@@ -400,7 +578,8 @@
   // Generate the code for cases for the fast case switch.
   // Called by GenerateFastCaseSwitchJumpTable.
   void GenerateFastCaseSwitchCases(SwitchStatement* node,
-                                   Vector<Label> case_labels);
+                                   Vector<Label> case_labels,
+                                   VirtualFrame* start_frame);
 
   // Fast support for constant-Smi switches.
   void GenerateFastCaseSwitchStatement(SwitchStatement* node,
@@ -416,10 +595,18 @@
   // Methods used to indicate which source code is generated for. Source
   // positions are collected by the assembler and emitted with the relocation
   // information.
-  void CodeForStatement(Node* node);
+  void CodeForFunctionPosition(FunctionLiteral* fun);
+  void CodeForStatementPosition(Node* node);
   void CodeForSourcePosition(int pos);
 
+#ifdef DEBUG
+  // True if the registers are valid for entry to a block.  There should be
+  // no frame-external references to eax, ebx, ecx, edx, or edi.
+  bool HasValidEntryRegisters();
+#endif
+
   bool is_eval_;  // Tells whether code is generated for eval.
+
   Handle<Script> script_;
   List<DeferredCode*> deferred_;
 
@@ -429,22 +616,43 @@
   // Code generation state
   Scope* scope_;
   VirtualFrame* frame_;
-  Condition cc_reg_;
+  RegisterAllocator* allocator_;
   CodeGenState* state_;
-  bool is_inside_try_;
-  int break_stack_height_;
   int loop_nesting_;
 
-  // Labels
-  Label function_return_;
+  // Jump targets.
+  // The target of the return from the function.
+  BreakTarget function_return_;
+
+  // True if the function return is shadowed (ie, jumping to the target
+  // function_return_ does not jump to the true function return, but rather
+  // to some unlinking code).
+  bool function_return_is_shadowed_;
+
+  // True when we are in code that expects the virtual frame to be fully
+  // spilled.  Some virtual frame function are disabled in DEBUG builds when
+  // called from spilled code, because they do not leave the virtual frame
+  // in a spilled state.
+  bool in_spilled_code_;
 
   friend class VirtualFrame;
+  friend class JumpTarget;
   friend class Reference;
+  friend class Result;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
 
+void Reference::GetValueAndSpill(TypeofState typeof_state) {
+  ASSERT(cgen_->in_spilled_code());
+  cgen_->set_in_spilled_code(false);
+  GetValue(typeof_state);
+  cgen_->frame()->SpillAll();
+  cgen_->set_in_spilled_code(true);
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_CODEGEN_IA32_H_
diff --git a/src/codegen.cc b/src/codegen.cc
index 271f571..558e854 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -38,8 +38,10 @@
 namespace v8 { namespace internal {
 
 DeferredCode::DeferredCode(CodeGenerator* generator)
-  : masm_(generator->masm()),
-    generator_(generator),
+  : generator_(generator),
+    masm_(generator->masm()),
+    enter_(generator),
+    exit_(generator, JumpTarget::BIDIRECTIONAL),
     statement_position_(masm_->current_statement_position()),
     position_(masm_->current_position()) {
   generator->AddDeferred(this);
@@ -51,6 +53,13 @@
 }
 
 
+void CodeGenerator::ClearDeferred() {
+  for (int i = 0; i < deferred_.length(); i++) {
+    deferred_[i]->Clear();
+  }
+}
+
+
 void CodeGenerator::ProcessDeferred() {
   while (!deferred_.is_empty()) {
     DeferredCode* code = deferred_.RemoveLast();
@@ -60,13 +69,40 @@
     if (code->position() != RelocInfo::kNoPosition) {
       masm->RecordPosition(code->position());
     }
-    // Bind labels and generate the code.
-    masm->bind(code->enter());
+    // Generate the code.
     Comment cmnt(masm, code->comment());
     code->Generate();
-    if (code->exit()->is_bound()) {
-      masm->jmp(code->exit());  // platform independent?
-    }
+    ASSERT(code->enter()->is_bound());
+    code->Clear();
+  }
+}
+
+
+void CodeGenerator::SetFrame(VirtualFrame* new_frame,
+                             RegisterFile* non_frame_registers) {
+  RegisterFile saved_counts;
+  if (has_valid_frame()) {
+    frame_->DetachFromCodeGenerator();
+    // The remaining register reference counts are the non-frame ones.
+    allocator_->SaveTo(&saved_counts);
+  }
+
+  if (new_frame != NULL) {
+    // Restore the non-frame register references that go with the new frame.
+    allocator_->RestoreFrom(non_frame_registers);
+    new_frame->AttachToCodeGenerator();
+  }
+
+  frame_ = new_frame;
+  saved_counts.CopyTo(non_frame_registers);
+}
+
+
+void CodeGenerator::DeleteFrame() {
+  if (has_valid_frame()) {
+    frame_->DetachFromCodeGenerator();
+    delete frame_;
+    frame_ = NULL;
   }
 }
 
@@ -122,9 +158,6 @@
     return Handle<Code>::null();
   }
 
-  // Process any deferred code.
-  cgen.ProcessDeferred();
-
   // Allocate and install the code.
   CodeDesc desc;
   cgen.masm()->GetCode(&desc);
@@ -154,7 +187,7 @@
       PrintF("\n\n");
     }
     PrintF("--- Code ---\n");
-    code->Disassemble();
+    code->Disassemble(*flit->name()->ToCString());
   }
 #endif  // ENABLE_DISASSEMBLER
 
@@ -212,6 +245,12 @@
 
 
 Handle<JSFunction> CodeGenerator::BuildBoilerplate(FunctionLiteral* node) {
+#ifdef DEBUG
+  // We should not try to compile the same function literal more than
+  // once.
+  node->mark_as_compiled();
+#endif
+
   // Determine if the function can be lazily compiled. This is
   // necessary to allow some of our builtin JS files to be lazily
   // compiled. These builtins cannot be handled lazily by the parser,
@@ -386,14 +425,14 @@
   ZoneList<CaseClause*>* cases = node->cases();
   int length = cases->length();
 
-  // Label pointer per number in range
+  // Label pointer per number in range.
   SmartPointer<Label*> case_targets(NewArray<Label*>(range));
 
-  // Label per switch case
+  // Label per switch case.
   SmartPointer<Label> case_labels(NewArray<Label>(length));
 
-  Label* fail_label = default_index >= 0 ? &(case_labels[default_index])
-                                         : node->break_target();
+  Label* fail_label =
+      default_index >= 0 ? &(case_labels[default_index]) : NULL;
 
   // Populate array of label pointers for each number in the range.
   // Initally put the failure label everywhere.
@@ -404,7 +443,7 @@
   // Overwrite with label of a case for the number value of that case.
   // (In reverse order, so that if the same label occurs twice, the
   // first one wins).
-  for (int i = length-1; i >= 0 ; i--) {
+  for (int i = length - 1; i >= 0 ; i--) {
     CaseClause* clause = cases->at(i);
     if (!clause->is_default()) {
       Object* label_value = *(clause->label()->AsLiteral()->handle());
@@ -424,21 +463,36 @@
 
 void CodeGenerator::GenerateFastCaseSwitchCases(
     SwitchStatement* node,
-    Vector<Label> case_labels) {
+    Vector<Label> case_labels,
+    VirtualFrame* start_frame) {
   ZoneList<CaseClause*>* cases = node->cases();
   int length = cases->length();
 
   for (int i = 0; i < length; i++) {
     Comment cmnt(masm(), "[ Case clause");
-    masm()->bind(&(case_labels[i]));
+
+    // We may not have a virtual frame if control flow did not fall
+    // off the end of the previous case.  In that case, use the start
+    // frame.  Otherwise, we have to merge the existing one to the
+    // start frame as part of the previous case.
+    if (!has_valid_frame()) {
+      RegisterFile non_frame_registers = RegisterAllocator::Reserved();
+      SetFrame(new VirtualFrame(start_frame), &non_frame_registers);
+    } else {
+      frame_->MergeTo(start_frame);
+    }
+    masm()->bind(&case_labels[i]);
     VisitStatements(cases->at(i)->statements());
   }
-
-  masm()->bind(node->break_target());
 }
 
 
 bool CodeGenerator::TryGenerateFastCaseSwitchStatement(SwitchStatement* node) {
+  // TODO(238): Due to issue 238, fast case switches can crash on ARM
+  // and possibly IA32.  They are disabled for now.
+  // See http://code.google.com/p/v8/issues/detail?id=238
+  return false;
+
   ZoneList<CaseClause*>* cases = node->cases();
   int length = cases->length();
 
@@ -454,9 +508,10 @@
     CaseClause* clause = cases->at(i);
     if (clause->is_default()) {
       if (default_index >= 0) {
-        return false;  // More than one default label:
-                       // Defer to normal case for error.
-    }
+        // There is more than one default label. Defer to the normal case
+        // for error.
+        return false;
+      }
       default_index = i;
     } else {
       Expression* label = clause->label();
@@ -468,9 +523,9 @@
       if (!value->IsSmi()) {
         return false;
       }
-      int smi = Smi::cast(value)->value();
-      if (smi < min_index) { min_index = smi; }
-      if (smi > max_index) { max_index = smi; }
+      int int_value = Smi::cast(value)->value();
+      min_index = Min(int_value, min_index);
+      max_index = Max(int_value, max_index);
     }
   }
 
@@ -486,7 +541,18 @@
 }
 
 
-void CodeGenerator::CodeForStatement(Node* node) {
+void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
+  if (FLAG_debug_info) {
+    int pos = fun->start_position();
+    if (pos != RelocInfo::kNoPosition) {
+      masm()->RecordStatementPosition(pos);
+      masm()->RecordPosition(pos);
+    }
+  }
+}
+
+
+void CodeGenerator::CodeForStatementPosition(Node* node) {
   if (FLAG_debug_info) {
     int pos = node->statement_pos();
     if (pos != RelocInfo::kNoPosition) {
diff --git a/src/codegen.h b/src/codegen.h
index 71fe660..3086638 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -37,14 +37,22 @@
 // of Visitor and that the following methods are available publicly:
 // CodeGenerator::MakeCode
 // CodeGenerator::SetFunctionInfo
-// CodeGenerator::AddDeferred
 // CodeGenerator::masm
+// CodeGenerator::frame
+// CodeGenerator::has_valid_frame
+// CodeGenerator::SetFrame
+// CodeGenerator::DeleteFrame
+// CodeGenerator::allocator
+// CodeGenerator::AddDeferred
+// CodeGenerator::in_spilled_code
+// CodeGenerator::set_in_spilled_code
 //
 // These methods are either used privately by the shared code or implemented as
 // shared code:
 // CodeGenerator::CodeGenerator
 // CodeGenerator::~CodeGenerator
 // CodeGenerator::ProcessDeferred
+// CodeGenerator::ClearDeferred
 // CodeGenerator::GenCode
 // CodeGenerator::BuildBoilerplate
 // CodeGenerator::ComputeCallInitialize
@@ -85,11 +93,23 @@
 
   virtual void Generate() = 0;
 
+  // Unuse the entry and exit targets, deallocating all virtual frames
+  // held by them.  It will be impossible to emit a (correct) jump
+  // into or out of the deferred code after clearing.
+  void Clear() {
+    enter_.Unuse();
+    exit_.Unuse();
+  }
+
   MacroAssembler* masm() const { return masm_; }
   CodeGenerator* generator() const { return generator_; }
 
-  Label* enter() { return &enter_; }
-  Label* exit() { return &exit_; }
+  JumpTarget* enter() { return &enter_; }
+  void BindExit() { exit_.Bind(0); }
+  void BindExit(Result* result) { exit_.Bind(result, 1); }
+  void BindExit(Result* result0, Result* result1, Result* result2) {
+    exit_.Bind(result0, result1, result2, 3);
+  }
 
   int statement_position() const { return statement_position_; }
   int position() const { return position_; }
@@ -103,15 +123,12 @@
 #endif
 
  protected:
-  // The masm_ field is manipulated when compiling stubs with the
-  // BEGIN_STUB and END_STUB macros. For that reason, it cannot be
-  // constant.
-  MacroAssembler* masm_;
+  CodeGenerator* const generator_;
+  MacroAssembler* const masm_;
+  JumpTarget enter_;
+  JumpTarget exit_;
 
  private:
-  CodeGenerator* const generator_;
-  Label enter_;
-  Label exit_;
   int statement_position_;
   int position_;
 #ifdef DEBUG
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index d5a0048..3775bc2 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -60,48 +60,6 @@
 }
 
 
-// We only re-use a cached function for some script source code if the
-// script originates from the same places. This is to avoid issues
-// when reporting errors, etc.
-static bool HasOrigin(Handle<JSFunction> boilerplate,
-                      Handle<Object> name,
-                      int line_offset,
-                      int column_offset) {
-  Handle<Script> script =
-      Handle<Script>(Script::cast(boilerplate->shared()->script()));
-  // If the script name isn't set, the boilerplate script should have
-  // an undefined name to have the same origin.
-  if (name.is_null()) {
-    return script->name()->IsUndefined();
-  }
-  // Do the fast bailout checks first.
-  if (line_offset != script->line_offset()->value()) return false;
-  if (column_offset != script->column_offset()->value()) return false;
-  // Check that both names are strings. If not, no match.
-  if (!name->IsString() || !script->name()->IsString()) return false;
-  // Compare the two name strings for equality.
-  return String::cast(*name)->Equals(String::cast(script->name()));
-}
-
-
-static Handle<JSFunction> Lookup(Handle<String> source,
-                                 CompilationCache::Entry entry) {
-  // Make sure not to leak the table into the surrounding handle
-  // scope. Otherwise, we risk keeping old tables around even after
-  // having cleared the cache.
-  Object* result;
-  { HandleScope scope;
-    Handle<CompilationCacheTable> table = GetTable(entry);
-    result = table->Lookup(*source);
-  }
-  if (result->IsJSFunction()) {
-    return Handle<JSFunction>(JSFunction::cast(result));
-  } else {
-    return Handle<JSFunction>::null();
-  }
-}
-
-
 static Handle<JSFunction> Lookup(Handle<String> source,
                                  Handle<Context> context,
                                  CompilationCache::Entry entry) {
@@ -121,20 +79,32 @@
 }
 
 
+static Handle<FixedArray> Lookup(Handle<String> source,
+                                 JSRegExp::Flags flags) {
+  // Make sure not to leak the table into the surrounding handle
+  // scope. Otherwise, we risk keeping old tables around even after
+  // having cleared the cache.
+  Object* result;
+  { HandleScope scope;
+    Handle<CompilationCacheTable> table = GetTable(CompilationCache::REGEXP);
+    result = table->LookupRegExp(*source, flags);
+  }
+  if (result->IsFixedArray()) {
+    return Handle<FixedArray>(FixedArray::cast(result));
+  } else {
+    return Handle<FixedArray>::null();
+  }
+}
+
+
 Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
                                                   Handle<Object> name,
                                                   int line_offset,
                                                   int column_offset) {
-  Handle<JSFunction> result = Lookup(source, SCRIPT);
-  if (result.is_null()) {
-    Counters::compilation_cache_misses.Increment();
-  } else if (HasOrigin(result, name, line_offset, column_offset)) {
-    Counters::compilation_cache_hits.Increment();
-  } else {
-    result = Handle<JSFunction>::null();
-    Counters::compilation_cache_misses.Increment();
-  }
-  return result;
+  // TODO(245): Start caching scripts again but make it local to a
+  // global context to avoid sharing code between independent
+  // environments.
+  return Handle<JSFunction>::null();
 }
 
 
@@ -152,20 +122,31 @@
 }
 
 
-void CompilationCache::PutFunction(Handle<String> source,
-                                   Entry entry,
-                                   Handle<JSFunction> boilerplate) {
-  HandleScope scope;
-  ASSERT(boilerplate->IsBoilerplate());
-  Handle<CompilationCacheTable> table = GetTable(entry);
-  CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate));
+Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
+                                                  JSRegExp::Flags flags) {
+  Handle<FixedArray> result = Lookup(source, flags);
+  if (result.is_null()) {
+    Counters::compilation_cache_misses.Increment();
+  } else {
+    Counters::compilation_cache_hits.Increment();
+  }
+  return result;
 }
 
 
-void CompilationCache::PutEvalFunction(Handle<String> source,
-                                       Handle<Context> context,
-                                       Entry entry,
-                                       Handle<JSFunction> boilerplate) {
+void CompilationCache::PutScript(Handle<String> source,
+                                 Entry entry,
+                                 Handle<JSFunction> boilerplate) {
+  // TODO(245): Start caching scripts again but make it local to a
+  // global context to avoid sharing code between independent
+  // environments.
+}
+
+
+void CompilationCache::PutEval(Handle<String> source,
+                               Handle<Context> context,
+                               Entry entry,
+                               Handle<JSFunction> boilerplate) {
   HandleScope scope;
   ASSERT(boilerplate->IsBoilerplate());
   Handle<CompilationCacheTable> table = GetTable(entry);
@@ -173,19 +154,6 @@
 }
 
 
-Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
-                                                  JSRegExp::Flags flags) {
-  Handle<CompilationCacheTable> table = GetTable(REGEXP);
-  Object* result = table->LookupRegExp(*source, flags);
-  if (result->IsFixedArray()) {
-    Counters::regexp_cache_hits.Increment();
-    return Handle<FixedArray>(FixedArray::cast(result));
-  } else {
-    Counters::regexp_cache_misses.Increment();
-    return Handle<FixedArray>();
-  }
-}
-
 
 void CompilationCache::PutRegExp(Handle<String> source,
                                  JSRegExp::Flags flags,
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 3fc65c2..045a6f8 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -67,26 +67,25 @@
   static Handle<FixedArray> LookupRegExp(Handle<String> source,
                                          JSRegExp::Flags flags);
 
+  // Associate the (source, kind) pair to the boilerplate. This may
+  // overwrite an existing mapping.
+  static void PutScript(Handle<String> source,
+                        Entry entry,
+                        Handle<JSFunction> boilerplate);
+
+  // Associate the (source, context->closure()->shared(), kind) triple
+  // with the boilerplate. This may overwrite an existing mapping.
+  static void PutEval(Handle<String> source,
+                      Handle<Context> context,
+                      Entry entry,
+                      Handle<JSFunction> boilerplate);
+
   // Associate the (source, flags) pair to the given regexp data.
   // This may overwrite an existing mapping.
   static void PutRegExp(Handle<String> source,
                         JSRegExp::Flags flags,
                         Handle<FixedArray> data);
 
-  // Associate the (source, kind) pair to the boilerplate. This may
-  // overwrite an existing mapping.
-  static void PutFunction(Handle<String> source,
-                          Entry entry,
-                          Handle<JSFunction> boilerplate);
-
-  // Associate the (source, context->closure()->shared(), kind)
-  // triple with the boilerplate. This may overwrite an existing
-  // mapping.
-  static void PutEvalFunction(Handle<String> source,
-                              Handle<Context> context,
-                              Entry entry,
-                              Handle<JSFunction> boilerplate);
-
   // Clear the cache - also used to initialize the cache at startup.
   static void Clear();
 
diff --git a/src/compiler.cc b/src/compiler.cc
index 896e9d4..6450885 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -35,6 +35,7 @@
 #include "scopes.h"
 #include "rewriter.h"
 #include "usage-analyzer.h"
+#include "oprofile-agent.h"
 
 namespace v8 { namespace internal {
 
@@ -123,16 +124,20 @@
     return Handle<JSFunction>::null();
   }
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
+#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
   // Log the code generation for the script. Check explicit whether logging is
   // to avoid allocating when not required.
-  if (Logger::is_enabled()) {
+  if (Logger::is_enabled() || OProfileAgent::is_enabled()) {
     if (script->name()->IsString()) {
       SmartPointer<char> data =
           String::cast(script->name())->ToCString(DISALLOW_NULLS);
       LOG(CodeCreateEvent(is_eval ? "Eval" : "Script", *code, *data));
+      OProfileAgent::CreateNativeCodeRegion(*data, code->address(),
+                                            code->ExecutableSize());
     } else {
       LOG(CodeCreateEvent(is_eval ? "Eval" : "Script", *code, ""));
+      OProfileAgent::CreateNativeCodeRegion(is_eval ? "Eval" : "Script",
+          code->address(), code->ExecutableSize());
     }
   }
 #endif
@@ -210,7 +215,7 @@
                           extension,
                           pre_data);
     if (extension == NULL && !result.is_null()) {
-      CompilationCache::PutFunction(source, CompilationCache::SCRIPT, result);
+      CompilationCache::PutScript(source, CompilationCache::SCRIPT, result);
     }
 
     // Get rid of the pre-parsing data (if necessary).
@@ -220,7 +225,6 @@
   }
 
   if (result.is_null()) Top::ReportPendingMessages();
-
   return result;
 }
 
@@ -249,7 +253,7 @@
     script->set_line_offset(Smi::FromInt(line_offset));
     result = MakeFunction(is_global, true, script, context, NULL, NULL);
     if (!result.is_null()) {
-      CompilationCache::PutEvalFunction(source, context, entry, result);
+      CompilationCache::PutEval(source, context, entry, result);
     }
   }
 
@@ -307,20 +311,26 @@
     return false;
   }
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
+#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
   // Log the code generation. If source information is available include script
   // name and line number. Check explicit whether logging is enabled as finding
   // the line number is not for free.
-  if (Logger::is_enabled()) {
+  if (Logger::is_enabled() || OProfileAgent::is_enabled()) {
     if (script->name()->IsString()) {
-      int line_num = script->GetLineNumber(start_position);
+      int line_num = GetScriptLineNumber(script, start_position);
       if (line_num > 0) {
         line_num += script->line_offset()->value() + 1;
       }
       LOG(CodeCreateEvent("LazyCompile", *code, *lit->name(),
                           String::cast(script->name()), line_num));
+      OProfileAgent::CreateNativeCodeRegion(*lit->name(),
+                                            String::cast(script->name()),
+                                            line_num, code->address(),
+                                            code->ExecutableSize());
     } else {
       LOG(CodeCreateEvent("LazyCompile", *code, *lit->name()));
+      OProfileAgent::CreateNativeCodeRegion(*lit->name(), code->address(),
+                                            code->ExecutableSize());
     }
   }
 #endif
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
index cfe69ca..5b21816 100644
--- a/src/d8-debug.cc
+++ b/src/d8-debug.cc
@@ -28,6 +28,8 @@
 
 #include "d8.h"
 #include "d8-debug.h"
+#include "platform.h"
+#include "debug-agent.h"
 
 
 namespace v8 {
@@ -57,13 +59,17 @@
   }
 
   // Print the event details.
-  Handle<String> details =
-      Shell::DebugEventToText(Handle<String>::Cast(event_json));
-  if (details->Length() == 0) {
+  Handle<Object> details =
+      Shell::DebugMessageDetails(Handle<String>::Cast(event_json));
+  if (try_catch.HasCaught()) {
+    Shell::ReportException(&try_catch);
+    return;
+  }
+  String::Utf8Value str(details->Get(String::New("text")));
+  if (str.length() == 0) {
     // Empty string is used to signal not to process this event.
     return;
   }
-  String::Utf8Value str(details);
   printf("%s\n", *str);
 
   // Get the debug command processor.
@@ -123,7 +129,7 @@
     Handle<String> response = Handle<String>::Cast(response_val);
 
     // Convert the debugger response into text details and the running state.
-    Handle<Object> response_details = Shell::DebugResponseDetails(response);
+    Handle<Object> response_details = Shell::DebugMessageDetails(response);
     if (try_catch.HasCaught()) {
       Shell::ReportException(&try_catch);
       continue;
@@ -138,4 +144,197 @@
 }
 
 
+void RunRemoteDebugger(int port) {
+  RemoteDebugger debugger(port);
+  debugger.Run();
+}
+
+
+void RemoteDebugger::Run() {
+  bool ok;
+
+  // Make sure that socket support is initialized.
+  ok = i::Socket::Setup();
+  if (!ok) {
+    printf("Unable to initialize socket support %d\n", i::Socket::LastError());
+    return;
+  }
+
+  // Connect to the debugger agent.
+  conn_ = i::OS::CreateSocket();
+  static const int kPortStrSize = 6;
+  char port_str[kPortStrSize];
+  i::OS::SNPrintF(i::Vector<char>(port_str, kPortStrSize), "%d", port_);
+  ok = conn_->Connect("localhost", port_str);
+  if (!ok) {
+    printf("Unable to connect to debug agent %d\n", i::Socket::LastError());
+    return;
+  }
+
+  // Start the receiver thread.
+  ReceiverThread receiver(this);
+  receiver.Start();
+
+  // Start the keyboard thread.
+  KeyboardThread keyboard(this);
+  keyboard.Start();
+
+  // Process events received from debugged VM and from the keyboard.
+  bool terminate = false;
+  while (!terminate) {
+    event_available_->Wait();
+    RemoteDebuggerEvent* event = GetEvent();
+    switch (event->type()) {
+      case RemoteDebuggerEvent::kMessage:
+        HandleMessageReceived(event->data());
+        break;
+      case RemoteDebuggerEvent::kKeyboard:
+        HandleKeyboardCommand(event->data());
+        break;
+      case RemoteDebuggerEvent::kDisconnect:
+        terminate = true;
+        break;
+
+      default:
+        UNREACHABLE();
+    }
+    delete event;
+  }
+
+  // Wait for the receiver thread to end.
+  receiver.Join();
+}
+
+
+void RemoteDebugger::MessageReceived(i::SmartPointer<char> message) {
+  RemoteDebuggerEvent* event =
+      new RemoteDebuggerEvent(RemoteDebuggerEvent::kMessage, message);
+  AddEvent(event);
+}
+
+
+void RemoteDebugger::KeyboardCommand(i::SmartPointer<char> command) {
+  RemoteDebuggerEvent* event =
+      new RemoteDebuggerEvent(RemoteDebuggerEvent::kKeyboard, command);
+  AddEvent(event);
+}
+
+
+void RemoteDebugger::ConnectionClosed() {
+  RemoteDebuggerEvent* event =
+      new RemoteDebuggerEvent(RemoteDebuggerEvent::kDisconnect,
+                              i::SmartPointer<char>());
+  AddEvent(event);
+}
+
+
+void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
+  i::ScopedLock lock(event_access_);
+  if (head_ == NULL) {
+    ASSERT(tail_ == NULL);
+    head_ = event;
+    tail_ = event;
+  } else {
+    ASSERT(tail_ != NULL);
+    tail_->set_next(event);
+    tail_ = event;
+  }
+  event_available_->Signal();
+}
+
+
+RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
+  i::ScopedLock lock(event_access_);
+  ASSERT(head_ != NULL);
+  RemoteDebuggerEvent* result = head_;
+  head_ = head_->next();
+  if (head_ == NULL) {
+    ASSERT(tail_ == result);
+    tail_ = NULL;
+  }
+  return result;
+}
+
+
+void RemoteDebugger::HandleMessageReceived(char* message) {
+  HandleScope scope;
+
+  // Print the event details.
+  TryCatch try_catch;
+  Handle<Object> details =
+      Shell::DebugMessageDetails(Handle<String>::Cast(String::New(message)));
+  if (try_catch.HasCaught()) {
+      Shell::ReportException(&try_catch);
+    return;
+  }
+  String::Utf8Value str(details->Get(String::New("text")));
+  if (str.length() == 0) {
+    // Empty string is used to signal not to process this event.
+    return;
+  }
+  if (*str != NULL) {
+    printf("%s\n", *str);
+  } else {
+    printf("???\n");
+  }
+  printf("dbg> ");
+}
+
+
+void RemoteDebugger::HandleKeyboardCommand(char* command) {
+  HandleScope scope;
+
+  // Convert the debugger command to a JSON debugger request.
+  TryCatch try_catch;
+  Handle<Value> request =
+      Shell::DebugCommandToJSONRequest(String::New(command));
+  if (try_catch.HasCaught()) {
+    Shell::ReportException(&try_catch);
+    return;
+  }
+
+  // If undefined is returned the command was handled internally and there is
+  // no JSON to send.
+  if (request->IsUndefined()) {
+    return;
+  }
+
+  // Send the JSON debugger request.
+  i::DebuggerAgentUtil::SendMessage(conn_, Handle<String>::Cast(request));
+}
+
+
+void ReceiverThread::Run() {
+  while (true) {
+    // Receive a message.
+    i::SmartPointer<char> message =
+      i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
+    if (*message == NULL) {
+      remote_debugger_->ConnectionClosed();
+      return;
+    }
+
+    // Pass the message to the main thread.
+    remote_debugger_->MessageReceived(message);
+  }
+}
+
+
+void KeyboardThread::Run() {
+  static const int kBufferSize = 256;
+  while (true) {
+    // read keyboard input.
+    char command[kBufferSize];
+    char* str = fgets(command, kBufferSize, stdin);
+    if (str == NULL) {
+      break;
+    }
+
+    // Pass the keyboard command to the main thread.
+    remote_debugger_->KeyboardCommand(
+        i::SmartPointer<char>(i::OS::StrDup(command)));
+  }
+}
+
+
 }  // namespace v8
diff --git a/src/d8-debug.h b/src/d8-debug.h
index e6d66be..c7acc2f 100644
--- a/src/d8-debug.h
+++ b/src/d8-debug.h
@@ -41,6 +41,113 @@
                       Handle<Object> event_data,
                       Handle<Value> data);
 
+// Start the remove debugger connecting to a V8 debugger agent on the specified
+// port.
+void RunRemoteDebugger(int port);
+
+// Forward declerations.
+class RemoteDebuggerEvent;
+class ReceiverThread;
+
+
+// Remote debugging class.
+class RemoteDebugger {
+ public:
+  explicit RemoteDebugger(int port)
+      : port_(port),
+        event_access_(i::OS::CreateMutex()),
+        event_available_(i::OS::CreateSemaphore(0)),
+        head_(NULL), tail_(NULL) {}
+  void Run();
+
+  // Handle events from the subordinate threads.
+  void MessageReceived(i::SmartPointer<char> message);
+  void KeyboardCommand(i::SmartPointer<char> command);
+  void ConnectionClosed();
+
+ private:
+  // Add new debugger event to the list.
+  void AddEvent(RemoteDebuggerEvent* event);
+  // Read next debugger event from the list.
+  RemoteDebuggerEvent* GetEvent();
+
+  // Handle a message from the debugged V8.
+  void HandleMessageReceived(char* message);
+  // Handle a keyboard command.
+  void HandleKeyboardCommand(char* command);
+
+  // Get connection to agent in debugged V8.
+  i::Socket* conn() { return conn_; }
+
+  int port_;  // Port used to connect to debugger V8.
+  i::Socket* conn_;  // Connection to debugger agent in debugged V8.
+
+  // Linked list of events from debugged V8 and from keyboard input. Access to
+  // the list is guarded by a mutex and a semaphore signals new items in the
+  // list.
+  i::Mutex* event_access_;
+  i::Semaphore* event_available_;
+  RemoteDebuggerEvent* head_;
+  RemoteDebuggerEvent* tail_;
+
+  friend class ReceiverThread;
+};
+
+
+// Thread reading from debugged V8 instance.
+class ReceiverThread: public i::Thread {
+ public:
+  explicit ReceiverThread(RemoteDebugger* remote_debugger)
+      : remote_debugger_(remote_debugger) {}
+  ~ReceiverThread() {}
+
+  void Run();
+
+ private:
+  RemoteDebugger* remote_debugger_;
+};
+
+
+// Thread reading keyboard input.
+class KeyboardThread: public i::Thread {
+ public:
+  explicit KeyboardThread(RemoteDebugger* remote_debugger)
+      : remote_debugger_(remote_debugger) {}
+  ~KeyboardThread() {}
+
+  void Run();
+
+ private:
+  RemoteDebugger* remote_debugger_;
+};
+
+
+// Events processed by the main deubgger thread.
+class RemoteDebuggerEvent {
+ public:
+  RemoteDebuggerEvent(int type, i::SmartPointer<char> data)
+      : type_(type), data_(data), next_(NULL) {
+    ASSERT(type == kMessage || type == kKeyboard || type == kDisconnect);
+  }
+
+  static const int kMessage = 1;
+  static const int kKeyboard = 2;
+  static const int kDisconnect = 3;
+
+  int type() { return type_; }
+  char* data() { return *data_; }
+
+ private:
+  void set_next(RemoteDebuggerEvent* event) { next_ = event; }
+  RemoteDebuggerEvent* next() { return next_; }
+
+  int type_;
+  i::SmartPointer<char> data_;
+  RemoteDebuggerEvent* next_;
+
+  friend class RemoteDebugger;
+};
+
 
 }  // namespace v8
 
diff --git a/src/d8.cc b/src/d8.cc
index a049430..5f5e81c 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -232,21 +232,14 @@
 }
 
 
-Handle<String> Shell::DebugEventToText(Handle<String> event) {
-  HandleScope handle_scope;
+Handle<Object> Shell::DebugMessageDetails(Handle<String> message) {
   Context::Scope context_scope(utility_context_);
   Handle<Object> global = utility_context_->Global();
-  Handle<Value> fun = global->Get(String::New("DebugEventToText"));
-  TryCatch try_catch;
-  try_catch.SetVerbose(true);
+  Handle<Value> fun = global->Get(String::New("DebugMessageDetails"));
   static const int kArgc = 1;
-  Handle<Value> argv[kArgc] = { event };
+  Handle<Value> argv[kArgc] = { message };
   Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
-  if (try_catch.HasCaught()) {
-    return handle_scope.Close(try_catch.Exception()->ToString());
-  } else {
-    return handle_scope.Close(Handle<String>::Cast(val));
-  }
+  return Handle<Object>::Cast(val);
 }
 
 
@@ -261,17 +254,6 @@
 }
 
 
-Handle<Object> Shell::DebugResponseDetails(Handle<String> response) {
-  Context::Scope context_scope(utility_context_);
-  Handle<Object> global = utility_context_->Global();
-  Handle<Value> fun = global->Get(String::New("DebugResponseDetails"));
-  static const int kArgc = 1;
-  Handle<Value> argv[kArgc] = { response };
-  Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
-  return Handle<Object>::Cast(val);
-}
-
-
 int32_t* Counter::Bind(const char* name) {
   int i;
   for (i = 0; i < kMaxNameSize - 1 && name[i]; i++)
@@ -369,7 +351,15 @@
                                       shell_source.length());
   Handle<String> name = String::New(shell_source_name.start(),
                                     shell_source_name.length());
-  Script::Compile(source, name)->Run();
+  Handle<Script> script = Script::Compile(source, name);
+  script->Run();
+
+  // Mark the d8 shell script as native to avoid it showing up as normal source
+  // in the debugger.
+  i::Handle<i::JSFunction> script_fun = Utils::OpenHandle(*script);
+  i::Handle<i::Script> script_object =
+      i::Handle<i::Script>(i::Script::cast(script_fun->shared()->script()));
+  script_object->set_type(i::Smi::FromInt(i::SCRIPT_TYPE_NATIVE));
 
   // Create the evaluation context
   evaluation_context_ = Context::New(NULL, global_template);
@@ -591,8 +581,22 @@
           return 1;
       }
     }
-    if (i::FLAG_debugger)
+
+    // Run the remote debugger if requested.
+    if (i::FLAG_remote_debugger) {
+      RunRemoteDebugger(i::FLAG_debugger_port);
+      return 0;
+    }
+
+    // Start the debugger agent if requested.
+    if (i::FLAG_debugger_agent) {
+      v8::Debug::EnableAgent(i::FLAG_debugger_port);
+    }
+
+    // Start the in-process debugger if requested.
+    if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
       v8::Debug::SetDebugEventListener(HandleDebugEvent);
+    }
   }
   if (run_shell)
     RunShell();
diff --git a/src/d8.h b/src/d8.h
index 726b369..45bab92 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -88,9 +88,8 @@
   static int Main(int argc, char* argv[]);
   static Handle<Array> GetCompletions(Handle<String> text,
                                       Handle<String> full);
-  static Handle<String> DebugEventToText(Handle<String> event);
+  static Handle<Object> DebugMessageDetails(Handle<String> message);
   static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
-  static Handle<Object> DebugResponseDetails(Handle<String> response);
 
   static Handle<Value> Print(const Arguments& args);
   static Handle<Value> Quit(const Arguments& args);
diff --git a/src/d8.js b/src/d8.js
index 25896ba..04b90fe 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -102,85 +102,98 @@
 var trace_compile = false;  // Tracing all compile events?
 
 
-function DebugEventToText(event) {
+// Process a debugger JSON message into a display text and a running status.
+// This function returns an object with properties "text" and "running" holding
+// this information.
+function DebugMessageDetails(message) {
   // Convert the JSON string to an object.
-  var response = new ProtocolPackage(event);
+  var response = new ProtocolPackage(message);
 
-  // Build the text.
+  if (response.type() == 'event') {
+    return DebugEventDetails(response);
+  } else {
+    return DebugResponseDetails(response);
+  }
+}
+
+function DebugEventDetails(response) {
+  details = {text:'', running:false}
+
+  // Get the running state.
+  details.running = response.running();
+
   var body = response.body();
-  var details = '';
+  var result = '';
   switch (response.event()) {
     case 'break':
       if (body.breakpoints) {
-        details += 'breakpoint';
+        result += 'breakpoint';
         if (body.breakpoints.length > 1) {
-          details += 's';
+          result += 's';
         }
-        details += ' #';
+        result += ' #';
         for (var i = 0; i < body.breakpoints.length; i++) {
           if (i > 0) {
-            details += ', #';
+            result += ', #';
           }
-          details += body.breakpoints[i];
+          result += body.breakpoints[i];
         }
       } else {
-        details += 'break';
+        result += 'break';
       }
-      details += ' in ';
-      details += body.invocationText;
-      details += ', ';
-      details += SourceInfo(body);
-      details += '\n';
-      details += SourceUnderline(body.sourceLineText, body.sourceColumn);
+      result += ' in ';
+      result += body.invocationText;
+      result += ', ';
+      result += SourceInfo(body);
+      result += '\n';
+      result += SourceUnderline(body.sourceLineText, body.sourceColumn);
       Debug.State.currentSourceLine = body.sourceLine;
       Debug.State.currentFrame = 0;
-      return details;
+      details.text = result;
+      break;
       
     case 'exception':
       if (body.uncaught) {
-        details += 'Uncaught: ';
+        result += 'Uncaught: ';
       } else {
-        details += 'Exception: ';
+        result += 'Exception: ';
       }
-      details += '"';
-      details += body.exception.text;
-      details += '"';
+      result += '"';
+      result += body.exception.text;
+      result += '"';
       if (body.sourceLine >= 0) {
-        details += ', ';
-        details += SourceInfo(body);
-        details += '\n';
-        details += SourceUnderline(body.sourceLineText, body.sourceColumn);
+        result += ', ';
+        result += SourceInfo(body);
+        result += '\n';
+        result += SourceUnderline(body.sourceLineText, body.sourceColumn);
         Debug.State.currentSourceLine = body.sourceLine;
         Debug.State.currentFrame = 0;
       } else {
-        details += ' (empty stack)';
+        result += ' (empty stack)';
         Debug.State.currentSourceLine = -1;
         Debug.State.currentFrame = kNoFrame;
       }
-      return details;
+      details.text = result;
+      break;
 
-    case 'exception':
-      if (trace_compile) {
-        details = 'Source ' + body.script.name + ' compiled:\n'
-      } else {
-        return '';
-      }
-     
     case 'afterCompile':
       if (trace_compile) {
-        details = 'Source ' + event.script().name() + ' compiled:\n'
+        result = 'Source ' + body.script.name + ' compiled:\n'
         var source = body.script.source;
         if (!(source[source.length - 1] == '\n')) {
-          details += source;
+          result += source;
         } else {
-          details += source.substring(0, source.length - 1);
+          result += source.substring(0, source.length - 1);
         }
-        return details;
-      } else {
-        return '';
       }
+      details.text = result;
+      break;
+
+    default:
+      details.text = 'Unknown debug event ' + response.event();
   }
-  return 'Unknown debug event ' + response.event();
+
+  return details;
 };
 
 
@@ -749,13 +762,10 @@
 
 
 // Convert a JSON response to text for display in a text based debugger.
-function DebugResponseDetails(json_response) {
+function DebugResponseDetails(response) {
   details = {text:'', running:false}
 
   try {
-    // Convert the JSON string to an object.
-    var response = new ProtocolPackage(json_response);
-
     if (!response.success()) {
       details.text = response.message();
       return details;
@@ -878,18 +888,19 @@
         var result = '';
         for (i = 0; i < body.length; i++) {
           if (i != 0) result += '\n';
+          if (body[i].id) {
+            result += body[i].id;
+          } else {
+            result += '[no id]';
+          }
+          result += ', ';
           if (body[i].name) {
             result += body[i].name;
           } else {
             result += '[unnamed] ';
-            var sourceStart = body[i].sourceStart;
-            if (sourceStart.length > 40) {
-              sourceStart = sourceStart.substring(0, 37) + '...';
-            }
-            result += sourceStart;
           }
           result += ' (lines: ';
-          result += body[i].sourceLines;
+          result += body[i].lineCount;
           result += ', length: ';
           result += body[i].sourceLength;
           if (body[i].type == Debug.ScriptType.Native) {
@@ -897,7 +908,13 @@
           } else if (body[i].type == Debug.ScriptType.Extension) {
             result += ', extension';
           }
-          result += ')';
+          result += '), [';
+          var sourceStart = body[i].sourceStart;
+          if (sourceStart.length > 40) {
+            sourceStart = sourceStart.substring(0, 37) + '...';
+          }
+          result += sourceStart;
+          result += ']';
         }
         details.text = result;
         break;
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
new file mode 100644
index 0000000..1ab5aac
--- /dev/null
+++ b/src/debug-agent.cc
@@ -0,0 +1,311 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+#include "debug-agent.h"
+
+namespace v8 { namespace internal {
+
+
+// Public V8 debugger API message handler function. This function just delegates
+// to the debugger agent through it's data parameter.
+void DebuggerAgentMessageHandler(const uint16_t* message, int length,
+                                 void *data) {
+  reinterpret_cast<DebuggerAgent*>(data)->DebuggerMessage(message, length);
+}
+
+
+// Debugger agent main thread.
+void DebuggerAgent::Run() {
+  // Create a server socket and bind it to the requested port.
+  server_ = OS::CreateSocket();
+  server_->Bind(port_);
+
+  while (!terminate_) {
+    // Listen for new connections.
+    server_->Listen(1);
+
+    // Accept the new connection.
+    Socket* client = server_->Accept();
+
+    // Create and start a new session.
+    CreateSession(client);
+  }
+}
+
+
+void DebuggerAgent::Shutdown() {
+  delete server_;
+}
+
+
+void DebuggerAgent::CreateSession(Socket* client) {
+  ScopedLock with(session_access_);
+
+  // If another session is already established terminate this one.
+  if (session_ != NULL) {
+    static const char* message = "Remote debugging session already active\n";
+
+    client->Send(message, strlen(message));
+    delete client;
+    return;
+  }
+
+  // Create a new session and hook up the debug message handler.
+  session_ = new DebuggerAgentSession(this, client);
+  v8::Debug::SetMessageHandler(DebuggerAgentMessageHandler, this);
+  session_->Start();
+}
+
+
+void DebuggerAgent::DebuggerMessage(const uint16_t* message, int length) {
+  ScopedLock with(session_access_);
+
+  // Forward the message handling to the session.
+  if (session_ != NULL) {
+    session_->DebuggerMessage(Vector<uint16_t>(const_cast<uint16_t*>(message),
+                              length));
+  }
+}
+
+
+void DebuggerAgent::SessionClosed(DebuggerAgentSession* session) {
+  ScopedLock with(session_access_);
+
+  // Terminate the session.
+  ASSERT(session == session_);
+  if (session == session_) {
+    session->Join();
+    delete session;
+    session_ = NULL;
+  }
+}
+
+
+void DebuggerAgentSession::Run() {
+  while (true) {
+    // Read data from the debugger front end.
+    SmartPointer<char> message = DebuggerAgentUtil::ReceiveMessage(client_);
+    if (*message == NULL) {
+      // Session is closed.
+      agent_->SessionClosed(this);
+      return;
+    }
+
+    // Convert UTF-8 to UTF-16.
+    unibrow::Utf8InputBuffer<> buf(*message, strlen(*message));
+    int len = 0;
+    while (buf.has_more()) {
+      buf.GetNext();
+      len++;
+    }
+    int16_t* temp = NewArray<int16_t>(len + 1);
+    buf.Reset(*message, strlen(*message));
+    for (int i = 0; i < len; i++) {
+      temp[i] = buf.GetNext();
+    }
+
+    // Send the request received to the debugger.
+    v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp), len);
+    DeleteArray(temp);
+  }
+}
+
+
+void DebuggerAgentSession::DebuggerMessage(Vector<uint16_t> message) {
+  DebuggerAgentUtil::SendMessage(client_, message);
+}
+
+
+const char* DebuggerAgentUtil::kContentLength = "Content-Length";
+int DebuggerAgentUtil::kContentLengthSize = strlen(kContentLength);
+
+
+SmartPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
+  int received;
+
+  // Read header.
+  const int kHeaderBufferSize = 80;
+  char header_buffer[kHeaderBufferSize];
+  int header_buffer_position = 0;
+  char c = '\0';  // One character receive buffer.
+  char last_c = '\0';  // Previous character.
+  int content_length = 0;
+  while (!(c == '\n' && last_c == '\n')) {
+    last_c = c;
+    received = conn->Receive(&c, 1);
+    if (received <= 0) {
+      PrintF("Error %d\n", Socket::LastError());
+      return SmartPointer<char>();
+    }
+
+    // Check for end of header line.
+    if (c == '\n') {
+      // Empty header line.
+      if (header_buffer_position == 0) {
+        continue;
+      }
+
+      // Terminate header.
+      ASSERT(header_buffer_position < kHeaderBufferSize);
+      if (header_buffer_position < kHeaderBufferSize) {
+        header_buffer[header_buffer_position] = '\0';
+      }
+
+      // Split header.
+      char* key = header_buffer;
+      char* value = NULL;
+      for (int i = 0; i < header_buffer_position; i++) {
+        if (header_buffer[i] == ':') {
+          header_buffer[i] = '\0';
+          value = header_buffer + i + 1;
+          while (*value == ' ') {
+            value++;
+          }
+          break;
+        }
+      }
+
+      // Check that key is Content-Length.
+      if (strcmp(key, kContentLength) == 0) {
+        // Get the content length value if within a sensible range.
+        if (strlen(value) > 7) {
+          return SmartPointer<char>();
+        }
+        for (int i = 0; value[i] != '\0'; i++) {
+          // Bail out if illegal data.
+          if (value[i] < '0' || value[i] > '9') {
+            return SmartPointer<char>();
+          }
+          content_length = 10 * content_length + (value[i] - '0');
+        }
+      }
+
+      // Start collecting new header.
+      header_buffer_position = 0;
+    } else {
+      // Add character to header buffer (reserve room for terminating '\0').
+      if (header_buffer_position < kHeaderBufferSize - 1) {
+        header_buffer[header_buffer_position++] = c;
+      }
+    }
+  }
+
+  // Read body.
+  char* buffer = NewArray<char>(content_length + 1);
+  received = ReceiveAll(conn, buffer, content_length);
+  if (received < content_length) {
+    PrintF("Error %d\n", Socket::LastError());
+    return SmartPointer<char>();
+  }
+  buffer[content_length] = '\0';
+
+  return SmartPointer<char>(buffer);
+}
+
+
+bool DebuggerAgentUtil::SendMessage(const Socket* conn,
+                                    const Vector<uint16_t> message) {
+  static const int kBufferSize = 80;
+  char buffer[kBufferSize];  // Sending buffer both for header and body.
+
+  // Calculate the message size in UTF-8 encoding.
+  int utf8_len = 0;
+  for (int i = 0; i < message.length(); i++) {
+    utf8_len += unibrow::Utf8::Length(message[i]);
+  }
+
+  // Send the header.
+  int len;
+  len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+                     "Content-Length: %d\n", utf8_len);
+  conn->Send(buffer, len);
+
+  // Terminate header with empty line.
+  len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\n");
+  conn->Send(buffer, len);
+
+  // Send message body as UTF-8.
+  int buffer_position = 0;  // Current buffer position.
+  for (int i = 0; i < message.length(); i++) {
+    // Write next UTF-8 encoded character to buffer.
+    buffer_position +=
+        unibrow::Utf8::Encode(buffer + buffer_position, message[i]);
+    ASSERT(buffer_position < kBufferSize);
+
+    // Send buffer if full or last character is encoded.
+    if (kBufferSize - buffer_position < 3 || i == message.length() - 1) {
+      conn->Send(buffer, buffer_position);
+      buffer_position = 0;
+    }
+  }
+
+  return true;
+}
+
+
+bool DebuggerAgentUtil::SendMessage(const Socket* conn,
+                                    const v8::Handle<v8::String> request) {
+  static const int kBufferSize = 80;
+  char buffer[kBufferSize];  // Sending buffer both for header and body.
+
+  // Convert the request to UTF-8 encoding.
+  v8::String::Utf8Value utf8_request(request);
+
+  // Send the header.
+  int len;
+  len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+                     "Content-Length: %d\n", utf8_request.length());
+  conn->Send(buffer, len);
+
+  // Terminate header with empty line.
+  len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\n");
+  conn->Send(buffer, len);
+
+  // Send message body as UTF-8.
+  conn->Send(*utf8_request, utf8_request.length());
+
+  return true;
+}
+
+
+// Receive the full buffer before returning unless an error occours.
+int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
+  int total_received = 0;
+  while (total_received < len) {
+    int received = conn->Receive(data + total_received, len - total_received);
+    if (received <= 0) {
+      return total_received;
+    }
+    total_received += received;
+  }
+  return total_received;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/debug-agent.h b/src/debug-agent.h
new file mode 100644
index 0000000..7ca7ec4
--- /dev/null
+++ b/src/debug-agent.h
@@ -0,0 +1,109 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_V8_DEBUG_AGENT_H_
+#define V8_V8_DEBUG_AGENT_H_
+
+#include "../include/v8-debug.h"
+#include "platform.h"
+
+namespace v8 { namespace internal {
+
+
+// Forward decelrations.
+class DebuggerAgentSession;
+
+
+// Debugger agent which starts a socket listener on the debugger port and
+// handles connection from a remote debugger.
+class DebuggerAgent: public Thread {
+ public:
+  explicit DebuggerAgent(int port)
+      : port_(port), server_(OS::CreateSocket()), terminate_(false),
+        session_access_(OS::CreateMutex()), session_(NULL) {}
+  ~DebuggerAgent() {}
+
+  void Shutdown();
+
+ private:
+  void Run();
+  void CreateSession(Socket* socket);
+  void DebuggerMessage(const uint16_t* message, int length);
+  void SessionClosed(DebuggerAgentSession* session);
+
+  int port_;  // Port to use for the agent.
+  Socket* server_;  // Server socket for listen/accept.
+  bool terminate_;  // Termination flag.
+  Mutex* session_access_;  // Mutex guarging access to session_.
+  DebuggerAgentSession* session_;  // Current active session if any.
+
+  friend class DebuggerAgentSession;
+  friend void DebuggerAgentMessageHandler(const uint16_t* message, int length,
+                                          void *data);
+
+  DISALLOW_COPY_AND_ASSIGN(DebuggerAgent);
+};
+
+
+// Debugger agent session. The session receives requests from the remote
+// debugger and sends debugger events/responses to the remote debugger.
+class DebuggerAgentSession: public Thread {
+ public:
+  DebuggerAgentSession(DebuggerAgent* agent, Socket* client)
+      : agent_(agent), client_(client) {}
+
+  void DebuggerMessage(Vector<uint16_t> message);
+
+ private:
+  void Run();
+
+  void DebuggerMessage(Vector<char> message);
+
+  DebuggerAgent* agent_;
+  const Socket* client_;
+
+  DISALLOW_COPY_AND_ASSIGN(DebuggerAgentSession);
+};
+
+
+// Utility methods factored out to be used by the D8 shell as well.
+class DebuggerAgentUtil {
+ public:
+  static const char* kContentLength;
+  static int kContentLengthSize;
+
+  static SmartPointer<char> ReceiveMessage(const Socket* conn);
+  static bool SendMessage(const Socket* conn, const Vector<uint16_t> message);
+  static bool SendMessage(const Socket* conn,
+                          const v8::Handle<v8::String> message);
+  static int ReceiveAll(const Socket* conn, char* data, int len);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_V8_DEBUG_AGENT_H_
diff --git a/src/debug-delay.js b/src/debug-delay.js
index cfd1c7d..11796ad 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -61,6 +61,10 @@
                      Extension: 1,
                      Normal: 2 };
 
+// The different script break point types.
+Debug.ScriptBreakPointType = { ScriptId: 0,
+                               ScriptName: 1 };
+
 function ScriptTypeFlag(type) {
   return (1 << type);
 }
@@ -210,9 +214,15 @@
 
 
 // Object representing a script break point. The script is referenced by its
-// script name and the break point is represented as line and column.
-function ScriptBreakPoint(script_name, opt_line, opt_column) {
-  this.script_name_ = script_name;
+// script name or script id and the break point is represented as line and
+// column.
+function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column) {
+  this.type_ = type;
+  if (type == Debug.ScriptBreakPointType.ScriptId) {
+    this.script_id_ = script_id_or_name;
+  } else {  // type == Debug.ScriptBreakPointType.ScriptName
+    this.script_name_ = script_id_or_name;
+  }
   this.line_ = opt_line || 0;
   this.column_ = opt_column;
   this.hit_count_ = 0;
@@ -227,6 +237,16 @@
 };
 
 
+ScriptBreakPoint.prototype.type = function() {
+  return this.type_;
+};
+
+
+ScriptBreakPoint.prototype.script_id = function() {
+  return this.script_id_;
+};
+
+
 ScriptBreakPoint.prototype.script_name = function() {
   return this.script_name_;
 };
@@ -292,9 +312,13 @@
 // Check whether a script matches this script break point. Currently this is
 // only based on script name.
 ScriptBreakPoint.prototype.matchesScript = function(script) {
-  return this.script_name_ == script.name &&
-         script.line_offset <= this.line_  &&
-         this.line_ < script.line_offset + script.lineCount();
+  if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
+    return this.script_id_ == script.id;
+  } else {  // this.type_ == Debug.ScriptBreakPointType.ScriptName
+    return this.script_name_ == script.name &&
+           script.line_offset <= this.line_  &&
+           this.line_ < script.line_offset + script.lineCount();
+  }
 };
 
 
@@ -356,7 +380,8 @@
 // break points set in this script.
 function UpdateScriptBreakPoints(script) {
   for (var i = 0; i < script_break_points.length; i++) {
-    if (script_break_points[i].script_name() == script.name) {
+    if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName &&
+        script_break_points[i].script_name() == script.name) {
       script_break_points[i].set(script);
     }
   }
@@ -437,10 +462,11 @@
   return %FunctionGetScriptSourcePosition(f);
 };
 
-Debug.findFunctionSourcePosition = function(func, opt_line, opt_column) {
+
+Debug.findFunctionSourceLocation = function(func, opt_line, opt_column) {
   var script = %FunctionGetScript(func);
   var script_offset = %FunctionGetScriptSourcePosition(func);
-  return script.locationFromLine(opt_line, opt_column, script_offset).position;
+  return script.locationFromLine(opt_line, opt_column, script_offset);
 }
 
 
@@ -478,8 +504,10 @@
   if (%FunctionIsAPIFunction(func)) {
     throw new Error('Cannot set break point in native code.');
   }
-  var source_position = this.findFunctionSourcePosition(func, opt_line, opt_column) -
-                        this.sourcePosition(func);
+  // Find source position relative to start of the function
+  var break_position =
+      this.findFunctionSourceLocation(func, opt_line, opt_column).position;
+  var source_position = break_position - this.sourcePosition(func);
   // Find the script for the function.
   var script = %FunctionGetScript(func);
   // Break in builtin JavaScript code is not supported.
@@ -488,15 +516,15 @@
   }
   // If the script for the function has a name convert this to a script break
   // point.
-  if (script && script.name) {
+  if (script && script.id) {
     // Adjust the source position to be script relative.
     source_position += %FunctionGetScriptSourcePosition(func);
     // Find line and column for the position in the script and set a script
     // break point from that.
     var location = script.locationFromPosition(source_position);
-    return this.setScriptBreakPoint(script.name,
-                                    location.line, location.column,
-                                    opt_condition);
+    return this.setScriptBreakPointById(script.id,
+                                        location.line, location.column,
+                                        opt_condition);
   } else {
     // Set a break point directly on the function.
     var break_point = MakeBreakPoint(source_position, opt_line, opt_column);
@@ -573,18 +601,20 @@
 }
 
 
-// Sets a breakpoint in a script identified through script name at the
+// Sets a breakpoint in a script identified through id or name at the
 // specified source line and column within that line.
-Debug.setScriptBreakPoint = function(script_name, opt_line, opt_column, opt_condition) {
+Debug.setScriptBreakPoint = function(type, script_id_or_name,
+                                     opt_line, opt_column, opt_condition) {
   // Create script break point object.
-  var script_break_point = new ScriptBreakPoint(script_name, opt_line, opt_column);
+  var script_break_point =
+      new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column);
 
   // Assign number to the new script break point and add it.
   script_break_point.number_ = next_break_point_number++;
   script_break_point.setCondition(opt_condition);
   script_break_points.push(script_break_point);
 
-  // Run through all scripts to see it this script break point matches any
+  // Run through all scripts to see if this script break point matches any
   // loaded scripts.
   var scripts = this.scripts();
   for (var i = 0; i < scripts.length; i++) {
@@ -597,6 +627,24 @@
 }
 
 
+Debug.setScriptBreakPointById = function(script_id,
+                                         opt_line, opt_column,
+                                         opt_condition) {
+  return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
+                                  script_id, opt_line, opt_column,
+                                  opt_condition)
+}
+
+
+Debug.setScriptBreakPointByName = function(script_name,
+                                           opt_line, opt_column,
+                                           opt_condition) {
+  return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName,
+                                  script_name, opt_line, opt_column,
+                                  opt_condition)
+}
+
+
 Debug.enableScriptBreakPoint = function(break_point_number) {
   var script_break_point = this.findScriptBreakPoint(break_point_number, false);
   script_break_point.enable();
@@ -789,11 +837,7 @@
     o.body.sourceLine = this.sourceLine(),
     o.body.sourceColumn = this.sourceColumn(),
     o.body.sourceLineText = this.sourceLineText(),
-    o.body.script = { name: script.name(),
-                      lineOffset: script.lineOffset(),
-                      columnOffset: script.columnOffset(),
-                      lineCount: script.lineCount()
-                    };
+    o.body.script = MakeScriptObject_(script, false);
   }
 
   // Add an Array of break points hit if any.
@@ -886,11 +930,7 @@
     // Add script information to the event if available.
     var script = this.func().script();
     if (script) {
-      o.body.script = { name: script.name(),
-                        lineOffset: script.lineOffset(),
-                        columnOffset: script.columnOffset(),
-                        lineCount: script.lineCount()
-                      };
+      o.body.script = MakeScriptObject_(script, false);
     }
   } else {
     o.body.sourceLine = -1;
@@ -939,12 +979,7 @@
     o.event = "afterCompile";
   }
   o.body = {};
-  o.body.script = { name: this.script_.name(),
-                    lineOffset: this.script_.lineOffset(),
-                    columnOffset: this.script_.columnOffset(),
-                    lineCount: this.script_.lineCount(),
-                    source: this.script_.source()
-                   };
+  o.body.script = MakeScriptObject_(this.script_, true);
 
   return o.toJSONProtocol();
 }
@@ -975,6 +1010,20 @@
 };
 
 
+function MakeScriptObject_(script, include_source) {
+  var o = { id: script.id(),
+            name: script.name(),
+            lineOffset: script.lineOffset(),
+            columnOffset: script.columnOffset(),
+            lineCount: script.lineCount(),
+          };
+  if (include_source) {
+    o.source = script.source();
+  }
+  return o;
+};
+
+
 function DebugCommandProcessor(exec_state) {
   this.exec_state_ = exec_state;
   this.running_ = false;
@@ -1219,7 +1268,7 @@
     response.failed('Missing argument "type" or "target"');
     return;
   }
-  if (type != 'function' && type != 'script') {
+  if (type != 'function' && type != 'script' && type != 'scriptId') {
     response.failed('Illegal type "' + type + '"');
     return;
   }
@@ -1248,11 +1297,13 @@
 
     // Set function break point.
     break_point_number = Debug.setBreakPoint(f, line, column, condition);
-  } else {
+  } else if (type == 'script') {
     // set script break point.
-    break_point_number = Debug.setScriptBreakPoint(target,
-                                                   line, column,
-                                                   condition);
+    break_point_number =
+        Debug.setScriptBreakPointByName(target, line, column, condition);
+  } else {  // type == 'scriptId.
+    break_point_number =
+        Debug.setScriptBreakPointById(target, line, column, condition);
   }
 
   // Set additional break point properties.
@@ -1270,8 +1321,13 @@
 
   // Add break point information to the response.
   if (break_point instanceof ScriptBreakPoint) {
-    response.body.type = 'script';
-    response.body.script_name = break_point.script_name();
+    if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
+      response.body.type = 'scriptId';
+      response.body.script_id = break_point.script_id();
+    } else {
+      response.body.type = 'scriptName';
+      response.body.script_name = break_point.script_name();
+    }
     response.body.line = break_point.line();
     response.body.column = break_point.column();
   } else {
@@ -1604,6 +1660,7 @@
       if (scripts[i].name) {
         script.name = scripts[i].name;
       }
+      script.id = scripts[i].id;
       script.lineOffset = scripts[i].line_offset;
       script.columnOffset = scripts[i].column_offset;
       script.lineCount = scripts[i].lineCount();
diff --git a/src/debug.cc b/src/debug.cc
index f36bf2d..b704eed 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -400,12 +400,17 @@
 
 // Threading support.
 void Debug::ThreadInit() {
+  thread_local_.break_count_ = 0;
+  thread_local_.break_id_ = 0;
+  thread_local_.break_frame_id_ = StackFrame::NO_ID;
   thread_local_.last_step_action_ = StepNone;
   thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
   thread_local_.step_count_ = 0;
   thread_local_.last_fp_ = 0;
   thread_local_.step_into_fp_ = 0;
   thread_local_.after_break_target_ = 0;
+  thread_local_.debugger_entry_ = NULL;
+  thread_local_.preemption_pending_ = false;
 }
 
 
@@ -611,6 +616,13 @@
 }
 
 
+// Set the flag indicating that preemption happened during debugging.
+void Debug::PreemptionWhileInDebugger() {
+  ASSERT(InDebugger());
+  Debug::set_preemption_pending(true);
+}
+
+
 void Debug::Iterate(ObjectVisitor* v) {
   v->VisitPointer(bit_cast<Object**, Code**>(&(debug_break_return_entry_)));
   v->VisitPointer(bit_cast<Object**, Code**>(&(debug_break_return_)));
@@ -743,7 +755,7 @@
           *Factory::LookupAsciiSymbol("IsBreakPointTriggered"))));
 
   // Get the break id as an object.
-  Handle<Object> break_id = Factory::NewNumberFromInt(Top::break_id());
+  Handle<Object> break_id = Factory::NewNumberFromInt(Debug::break_id());
 
   // Call HandleBreakPointx.
   bool caught_exception = false;
@@ -873,7 +885,7 @@
 
 void Debug::FloodHandlerWithOneShot() {
   // Iterate through the JavaScript stack looking for handlers.
-  StackFrame::Id id = Top::break_frame_id();
+  StackFrame::Id id = break_frame_id();
   if (id == StackFrame::NO_ID) {
     // If there is no JavaScript stack don't do anything.
     return;
@@ -913,7 +925,7 @@
   // any. The debug frame will only be present if execution was stopped due to
   // hitting a break point. In other situations (e.g. unhandled exception) the
   // debug frame is not present.
-  StackFrame::Id id = Top::break_frame_id();
+  StackFrame::Id id = break_frame_id();
   if (id == StackFrame::NO_ID) {
     // If there is no JavaScript stack don't do anything.
     return;
@@ -1118,6 +1130,18 @@
 }
 
 
+void Debug::NewBreak(StackFrame::Id break_frame_id) {
+  thread_local_.break_frame_id_ = break_frame_id;
+  thread_local_.break_id_ = ++thread_local_.break_count_;
+}
+
+
+void Debug::SetBreak(StackFrame::Id break_frame_id, int break_id) {
+  thread_local_.break_frame_id_ = break_frame_id;
+  thread_local_.break_id_ = break_id;
+}
+
+
 // Handle stepping into a function.
 void Debug::HandleStepIn(Handle<JSFunction> function,
                          Address fp,
@@ -1355,6 +1379,7 @@
 void* Debugger::message_handler_data_ = NULL;
 v8::DebugHostDispatchHandler Debugger::host_dispatch_handler_ = NULL;
 void* Debugger::host_dispatch_handler_data_ = NULL;
+DebuggerAgent* Debugger::agent_ = NULL;
 
 
 Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
@@ -1380,7 +1405,7 @@
 
 Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
   // Create the execution state object.
-  Handle<Object> break_id = Factory::NewNumberFromInt(Top::break_id());
+  Handle<Object> break_id = Factory::NewNumberFromInt(Debug::break_id());
   const int argc = 1;
   Object** argv[argc] = { break_id.location() };
   return MakeJSObject(CStrVector("MakeExecutionState"),
@@ -1792,6 +1817,17 @@
 }
 
 
+bool Debugger::StartAgent(int port) {
+  if (Socket::Setup()) {
+    agent_ = new DebuggerAgent(port);
+    agent_->Start();
+    return true;
+  }
+
+  return false;
+}
+
+
 DebugMessageThread::DebugMessageThread()
     : host_running_(true),
       command_queue_(kQueueInitialSize),
diff --git a/src/debug.h b/src/debug.h
index 1d534d6..9f4e047 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -31,14 +31,21 @@
 #include "../include/v8-debug.h"
 #include "assembler.h"
 #include "code-stubs.h"
+#include "debug-agent.h"
 #include "execution.h"
 #include "factory.h"
 #include "platform.h"
 #include "string-stream.h"
+#include "v8threads.h"
 
 
 namespace v8 { namespace internal {
 
+
+// Forward declarations.
+class EnterDebugger;
+
+
 // Step actions. NOTE: These values are in macros.py as well.
 enum StepAction {
   StepNone = -1,  // Stepping not prepared.
@@ -165,7 +172,8 @@
   static bool Load();
   static void Unload();
   static bool IsLoaded() { return !debug_context_.is_null(); }
-  static bool InDebugger() { return Top::is_break(); }
+  static bool InDebugger() { return thread_local_.debugger_entry_ != NULL; }
+  static void PreemptionWhileInDebugger();
   static void Iterate(ObjectVisitor* v);
 
   static Object* Break(Arguments args);
@@ -210,6 +218,16 @@
   // Fast check to see if any break points are active.
   inline static bool has_break_points() { return has_break_points_; }
 
+  static void NewBreak(StackFrame::Id break_frame_id);
+  static void SetBreak(StackFrame::Id break_frame_id, int break_id);
+  static StackFrame::Id break_frame_id() {
+    return thread_local_.break_frame_id_;
+  }
+  static int break_id() { return thread_local_.break_id_; }
+
+
+
+
   static bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
   static void HandleStepIn(Handle<JSFunction> function,
                            Address fp,
@@ -217,6 +235,20 @@
   static Address step_in_fp() { return thread_local_.step_into_fp_; }
   static Address* step_in_fp_addr() { return &thread_local_.step_into_fp_; }
 
+  static EnterDebugger* debugger_entry() {
+    return thread_local_.debugger_entry_;
+  }
+  static void set_debugger_entry(EnterDebugger* entry) {
+    thread_local_.debugger_entry_ = entry;
+  }
+
+  static bool preemption_pending() {
+    return thread_local_.preemption_pending_;
+  }
+  static void set_preemption_pending(bool preemption_pending) {
+    thread_local_.preemption_pending_ = preemption_pending;
+  }
+
   // Getter and setter for the disable break state.
   static bool disable_break() { return disable_break_; }
   static void set_disable_break(bool disable_break) {
@@ -312,9 +344,18 @@
   static bool break_on_exception_;
   static bool break_on_uncaught_exception_;
 
-  // Per-thread:
+  // Per-thread data.
   class ThreadLocal {
    public:
+    // Counter for generating next break id.
+    int break_count_;
+
+    // Current break id.
+    int break_id_;
+
+    // Frame id for the frame of the current break.
+    StackFrame::Id break_frame_id_;
+
     // Step action for last step performed.
     StepAction last_step_action_;
 
@@ -332,6 +373,12 @@
 
     // Storage location for jump when exiting debug break calls.
     Address after_break_target_;
+
+    // Top debugger entry.
+    EnterDebugger* debugger_entry_;
+
+    // Preemption happened while debugging.
+    bool preemption_pending_;
   };
 
   // Storage location for registers when handling debug break calls
@@ -391,6 +438,9 @@
                              Handle<Object> data,
                              bool* pending_exception);
 
+  // Start the debugger agent listening on the provided port.
+  static bool StartAgent(int port);
+
   inline static bool EventActive(v8::DebugEvent event) {
     // Currently argument event is not used.
     return !Debugger::compiling_natives_ && Debugger::debugger_active_;
@@ -419,6 +469,8 @@
   static v8::DebugHostDispatchHandler host_dispatch_handler_;
   static void* host_dispatch_handler_data_;
 
+  static DebuggerAgent* agent_;
+
   friend class DebugMessageThread;
 };
 
@@ -513,17 +565,34 @@
 // some reason could not be entered FailedToEnter will return true.
 class EnterDebugger BASE_EMBEDDED {
  public:
-  EnterDebugger() : has_js_frames_(!it_.done()) {
+  EnterDebugger()
+      : prev_(Debug::debugger_entry()),
+        has_js_frames_(!it_.done()) {
+    ASSERT(!Debug::preemption_pending());
+
+    // Link recursive debugger entry.
+    Debug::set_debugger_entry(this);
+
+    // If a preemption is pending when first entering the debugger clear it as
+    // we don't want preemption happening while executing JavaScript in the
+    // debugger. When recursively entering the debugger the preemption flag
+    // cannot be set as this is disabled while in the debugger (see
+    // RuntimePreempt).
+    if (prev_ == NULL && StackGuard::IsPreempted()) {
+      StackGuard::Continue(PREEMPT);
+    }
+    ASSERT(!StackGuard::IsPreempted());
+
     // Store the previous break id and frame id.
-    break_id_ = Top::break_id();
-    break_frame_id_ = Top::break_frame_id();
+    break_id_ = Debug::break_id();
+    break_frame_id_ = Debug::break_frame_id();
 
     // Create the new break info. If there is no JavaScript frames there is no
     // break frame id.
     if (has_js_frames_) {
-      Top::new_break(it_.frame()->id());
+      Debug::NewBreak(it_.frame()->id());
     } else {
-      Top::new_break(StackFrame::NO_ID);
+      Debug::NewBreak(StackFrame::NO_ID);
     }
 
     // Make sure that debugger is loaded and enter the debugger context.
@@ -537,7 +606,18 @@
 
   ~EnterDebugger() {
     // Restore to the previous break state.
-    Top::set_break(break_frame_id_, break_id_);
+    Debug::SetBreak(break_frame_id_, break_id_);
+
+    // Request preemption when leaving the last debugger entry and a preemption
+    // had been recorded while debugging. This is to avoid starvation in some
+    // debugging scenarios.
+    if (prev_ == NULL && Debug::preemption_pending()) {
+      StackGuard::Preempt();
+      Debug::set_preemption_pending(false);
+    }
+
+    // Leaving this debugger entry.
+    Debug::set_debugger_entry(prev_);
   }
 
   // Check whether the debugger could be entered.
@@ -547,6 +627,7 @@
   inline bool HasJavaScriptFrames() { return has_js_frames_; }
 
  private:
+  EnterDebugger* prev_;  // Previous debugger entry if entered recursively.
   JavaScriptFrameIterator it_;
   const bool has_js_frames_;  // Were there any JavaScript frames?
   StackFrame::Id break_frame_id_;  // Previous break frame id.
diff --git a/src/disasm-arm.cc b/src/disasm-arm.cc
index ff7b9ad..d19e042 100644
--- a/src/disasm-arm.cc
+++ b/src/disasm-arm.cc
@@ -829,6 +829,12 @@
 }
 
 
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+  UNREACHABLE();  // ARM does not have the concept of a byte register
+  return "nobytereg";
+}
+
+
 const char* NameConverter::NameOfXMMRegister(int reg) const {
   UNREACHABLE();  // ARM does not have any XMM registers
   return "noxmmreg";
diff --git a/src/disasm-ia32.cc b/src/disasm-ia32.cc
index 309375b..458844e 100644
--- a/src/disasm-ia32.cc
+++ b/src/disasm-ia32.cc
@@ -65,6 +65,7 @@
   {0x85, "test", REG_OPER_OP_ORDER},
   {0x31, "xor", OPER_REG_OP_ORDER},
   {0x33, "xor", REG_OPER_OP_ORDER},
+  {0x87, "xchg", REG_OPER_OP_ORDER},
   {0x8A, "mov_b", REG_OPER_OP_ORDER},
   {0x8B, "mov", REG_OPER_OP_ORDER},
   {-1, "", UNSET_OP_ORDER}
@@ -115,6 +116,14 @@
 };
 
 
+static const char* set_conditional_mnem[] = {
+  /*0*/ "seto", "setno", "setc", "setnc",
+  /*4*/ "setz", "setnz", "setna", "seta",
+  /*8*/ "sets", "setns", "setpe", "setpo",
+  /*12*/ "setl", "setnl", "setng", "setg"
+};
+
+
 enum InstructionType {
   NO_INSTR,
   ZERO_OPERANDS_INSTR,
@@ -177,6 +186,7 @@
   SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
   SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push");
   SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop");
+  SetTableRange(REGISTER_INSTR, 0x91, 0x97, "xchg eax,");  // 0x90 is nop.
   SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
 }
 
@@ -259,6 +269,11 @@
   }
 
 
+  const char* NameOfByteCPURegister(int reg) const {
+    return converter_.NameOfByteCPURegister(reg);
+  }
+
+
   const char* NameOfXMMRegister(int reg) const {
     return converter_.NameOfXMMRegister(reg);
   }
@@ -283,8 +298,11 @@
     *base = data & 7;
   }
 
+  typedef const char* (DisassemblerIA32::*RegisterNameMapping)(int reg) const;
 
+  int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
   int PrintRightOperand(byte* modrmp);
+  int PrintRightByteOperand(byte* modrmp);
   int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
   int PrintImmediateOp(byte* data);
   int F7Instruction(byte* data);
@@ -292,6 +310,7 @@
   int JumpShort(byte* data);
   int JumpConditional(byte* data, const char* comment);
   int JumpConditionalShort(byte* data, const char* comment);
+  int SetCC(byte* data);
   int FPUInstruction(byte* data);
   void AppendToBuffer(const char* format, ...);
 
@@ -315,10 +334,9 @@
   tmp_buffer_pos_ += result;
 }
 
-
-// Returns number of bytes used including the current *modrmp.
-// Writes instruction's right operand to 'tmp_buffer_'.
-int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
+int DisassemblerIA32::PrintRightOperandHelper(
+    byte* modrmp,
+    RegisterNameMapping register_name) {
   int mod, regop, rm;
   get_modrm(*modrmp, &mod, &regop, &rm);
   switch (mod) {
@@ -332,20 +350,20 @@
         int scale, index, base;
         get_sib(sib, &scale, &index, &base);
         if (index == esp && base == esp && scale == 0 /*times_1*/) {
-          AppendToBuffer("[%s]", NameOfCPURegister(rm));
+          AppendToBuffer("[%s]", (this->*register_name)(rm));
           return 2;
         } else if (base == ebp) {
           int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
           AppendToBuffer("[%s*%d+0x%x]",
-                         NameOfCPURegister(index),
+                         (this->*register_name)(index),
                          1 << scale,
                          disp);
           return 6;
         } else if (index != esp && base != ebp) {
           // [base+index*scale]
           AppendToBuffer("[%s+%s*%d]",
-                         NameOfCPURegister(base),
-                         NameOfCPURegister(index),
+                         (this->*register_name)(base),
+                         (this->*register_name)(index),
                          1 << scale);
           return 2;
         } else {
@@ -353,7 +371,7 @@
           return 1;
         }
       } else {
-        AppendToBuffer("[%s]", NameOfCPURegister(rm));
+        AppendToBuffer("[%s]", (this->*register_name)(rm));
         return 1;
       }
       break;
@@ -366,11 +384,11 @@
         int disp =
             mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) : *(modrmp + 2);
         if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
-          AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
+          AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
         } else {
           AppendToBuffer("[%s+%s*%d+0x%x]",
-                         NameOfCPURegister(base),
-                         NameOfCPURegister(index),
+                         (this->*register_name)(base),
+                         (this->*register_name)(index),
                          1 << scale,
                          disp);
         }
@@ -379,12 +397,12 @@
         // No sib.
         int disp =
             mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) : *(modrmp + 1);
-        AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
+        AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
         return mod == 2 ? 5 : 2;
       }
       break;
     case 3:
-      AppendToBuffer("%s", NameOfCPURegister(rm));
+      AppendToBuffer("%s", (this->*register_name)(rm));
       return 1;
     default:
       UnimplementedInstruction();
@@ -394,6 +412,17 @@
 }
 
 
+int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
+  return PrintRightOperandHelper(modrmp, &DisassemblerIA32::NameOfCPURegister);
+}
+
+
+int DisassemblerIA32::PrintRightByteOperand(byte* modrmp) {
+  return PrintRightOperandHelper(modrmp,
+                                 &DisassemblerIA32::NameOfByteCPURegister);
+}
+
+
 // Returns number of bytes used including the current *data.
 // Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
 int DisassemblerIA32::PrintOperands(const char* mnem,
@@ -575,6 +604,17 @@
 
 
 // Returns number of bytes used, including *data.
+int DisassemblerIA32::SetCC(byte* data) {
+  assert(*data == 0x0F);
+  byte cond = *(data+1) & 0x0F;
+  const char* mnem = set_conditional_mnem[cond];
+  AppendToBuffer("%s ", mnem);
+  PrintRightByteOperand(data+2);
+  return 3;  // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
 int DisassemblerIA32::FPUInstruction(byte* data) {
   byte b1 = *data;
   byte b2 = *(data + 1);
@@ -819,6 +859,8 @@
                      f0byte == 0xB7 || f0byte == 0xAF) {
             data += 2;
             data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
+          } else if ((f0byte & 0xF0) == 0x90) {
+            data += SetCC(data);
           } else {
             data += 2;
             if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
@@ -880,6 +922,16 @@
         }
         break;
 
+      case 0x80:
+        { data++;
+          AppendToBuffer("%s ", "cmpb");
+          data += PrintRightOperand(data);
+          int32_t imm = *data;
+          AppendToBuffer(",0x%x", imm);
+          data++;
+        }
+        break;
+
       case 0x88:  // 8bit, fall through
       case 0x89:  // 32bit
         { bool is_byte = *data == 0x88;
@@ -1054,12 +1106,17 @@
 
 
 static const char* cpu_regs[8] = {
-  "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
+  "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
+};
+
+
+static const char* byte_cpu_regs[8] = {
+  "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"
 };
 
 
 static const char* xmm_regs[8] = {
-  "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+  "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
 };
 
 
@@ -1081,6 +1138,12 @@
 }
 
 
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+  if (0 <= reg && reg < 8) return byte_cpu_regs[reg];
+  return "noreg";
+}
+
+
 const char* NameConverter::NameOfXMMRegister(int reg) const {
   if (0 <= reg && reg < 8) return xmm_regs[reg];
   return "noxmmreg";
diff --git a/src/disasm.h b/src/disasm.h
index 1fd5519..6ecd1c8 100644
--- a/src/disasm.h
+++ b/src/disasm.h
@@ -39,6 +39,7 @@
  public:
   virtual ~NameConverter() {}
   virtual const char* NameOfCPURegister(int reg) const;
+  virtual const char* NameOfByteCPURegister(int reg) const;
   virtual const char* NameOfXMMRegister(int reg) const;
   virtual const char* NameOfAddress(byte* addr) const;
   virtual const char* NameOfConstant(byte* addr) const;
diff --git a/src/disassembler.cc b/src/disassembler.cc
index af7df26..9178aed 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -99,7 +99,7 @@
   }
 }
 
-static const int kOutBufferSize = 256 + String::kMaxShortPrintLength;
+static const int kOutBufferSize = 1024 + String::kMaxShortPrintLength;
 static const int kRelocInfoPosition = 57;
 
 static int DecodeIt(FILE* f,
diff --git a/src/execution.cc b/src/execution.cc
index f721cbd..419cc92 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -523,7 +523,12 @@
 
   ContextSwitcher::PreemptionReceived();
 
-  {
+  if (Debug::InDebugger()) {
+    // If currently in the debugger don't do any actual preemption but record
+    // that preemption occoured while in the debugger.
+    Debug::PreemptionWhileInDebugger();
+  } else {
+    // Perform preemption.
     v8::Unlocker unlocker;
     Thread::YieldCPU();
   }
diff --git a/src/factory.cc b/src/factory.cc
index ec52520..ead40b4 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -153,14 +153,32 @@
 
 
 Handle<Script> Factory::NewScript(Handle<String> source) {
+  // Generate id for this script.
+  int id;
+  if (Heap::last_script_id()->IsUndefined()) {
+    // Script ids start from one.
+    id = 1;
+  } else {
+    // Increment id, wrap when positive smi is exhausted.
+    id = Smi::cast(Heap::last_script_id())->value();
+    id++;
+    if (!Smi::IsValid(id)) {
+      id = 0;
+    }
+  }
+  Heap::SetLastScriptId(Smi::FromInt(id));
+
+  // Create and initialize script object.
   Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
   script->set_source(*source);
   script->set_name(Heap::undefined_value());
+  script->set_id(Heap::last_script_id());
   script->set_line_offset(Smi::FromInt(0));
   script->set_column_offset(Smi::FromInt(0));
   script->set_type(Smi::FromInt(SCRIPT_TYPE_NORMAL));
   script->set_wrapper(*Factory::NewProxy(0, TENURED));
   script->set_line_ends(Heap::undefined_value());
+
   return script;
 }
 
@@ -277,6 +295,11 @@
 }
 
 
+Handle<Object> Factory::NewNumberFromUint(uint32_t value) {
+  CALL_HEAP_FUNCTION(Heap::NumberFromUint32(value), Object);
+}
+
+
 Handle<JSObject> Factory::NewNeanderObject() {
   CALL_HEAP_FUNCTION(Heap::AllocateJSObjectFromMap(Heap::neander_map()),
                      JSObject);
@@ -536,7 +559,9 @@
   // Copy the descriptors from the array.
   DescriptorWriter w(*result);
   for (DescriptorReader r(*array); !r.eos(); r.advance()) {
-    w.WriteFrom(&r);
+    if (!r.IsNullDescriptor()) {
+      w.WriteFrom(&r);
+    }
     descriptor_count++;
   }
 
@@ -826,12 +851,13 @@
 }
 
 
-void Factory::SetRegExpData(Handle<JSRegExp> regexp,
-                            JSRegExp::Type type,
-                            Handle<String> source,
-                            JSRegExp::Flags flags,
-                            Handle<Object> data) {
-  Handle<FixedArray> store = NewFixedArray(JSRegExp::kDataSize);
+void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp,
+                                JSRegExp::Type type,
+                                Handle<String> source,
+                                JSRegExp::Flags flags,
+                                Handle<Object> data) {
+  Handle<FixedArray> store = NewFixedArray(JSRegExp::kAtomDataSize);
+
   store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
   store->set(JSRegExp::kSourceIndex, *source);
   store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
@@ -839,6 +865,25 @@
   regexp->set_data(*store);
 }
 
+void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
+                                    JSRegExp::Type type,
+                                    Handle<String> source,
+                                    JSRegExp::Flags flags,
+                                    int capture_count) {
+  Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize);
+
+  store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
+  store->set(JSRegExp::kSourceIndex, *source);
+  store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
+  store->set(JSRegExp::kIrregexpASCIICodeIndex, Heap::the_hole_value());
+  store->set(JSRegExp::kIrregexpUC16CodeIndex, Heap::the_hole_value());
+  store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
+  store->set(JSRegExp::kIrregexpCaptureCountIndex,
+             Smi::FromInt(capture_count));
+  regexp->set_data(*store);
+}
+
+
 
 void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
                                 Handle<JSObject> instance,
diff --git a/src/factory.h b/src/factory.h
index f282896..754c6da 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -170,6 +170,7 @@
                                   PretenureFlag pretenure = NOT_TENURED);
 
   static Handle<Object> NewNumberFromInt(int value);
+  static Handle<Object> NewNumberFromUint(uint32_t value);
 
   // These objects are used by the api to create env-independent data
   // structures in the heap.
@@ -316,12 +317,20 @@
                                                Handle<FixedArray> keys);
 
   // Creates a new FixedArray that holds the data associated with the
-  // regexp and stores it in the regexp.
-  static void SetRegExpData(Handle<JSRegExp> regexp,
-                            JSRegExp::Type type,
-                            Handle<String> source,
-                            JSRegExp::Flags flags,
-                            Handle<Object> data);
+  // atom regexp and stores it in the regexp.
+  static void SetRegExpAtomData(Handle<JSRegExp> regexp,
+                                JSRegExp::Type type,
+                                Handle<String> source,
+                                JSRegExp::Flags flags,
+                                Handle<Object> match_pattern);
+
+  // Creates a new FixedArray that holds the data associated with the
+  // irregexp regexp and stores it in the regexp.
+  static void SetRegExpIrregexpData(Handle<JSRegExp> regexp,
+                                    JSRegExp::Type type,
+                                    Handle<String> source,
+                                    JSRegExp::Flags flags,
+                                    int capture_count);
 
  private:
   static Handle<JSFunction> NewFunctionHelper(Handle<String> name,
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 1ba1361..67ad3f2 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -151,7 +151,7 @@
 DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
 DEFINE_bool(trace_gc, false,
             "print one trace line following each garbage collection")
-DEFINE_bool(collect_maps, false,
+DEFINE_bool(collect_maps, true,
             "garbage collect maps from which no objects can be reached")
 
 // ic.cc
@@ -201,7 +201,8 @@
 
 // Regexp
 DEFINE_bool(trace_regexps, false, "trace regexp execution")
-DEFINE_bool(regexp_native, true, "use native code regexp implementation (IA32 only)")
+DEFINE_bool(regexp_native, true,
+            "use native code regexp implementation (IA32 only)")
 DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
 
 // Testing flags test/cctest/test-{flags,api,serialization}.cc
@@ -225,6 +226,10 @@
 DEFINE_bool(help, false, "Print usage message, including flags, on console")
 DEFINE_bool(dump_counters, false, "Dump counters on exit")
 DEFINE_bool(debugger, true, "Enable JavaScript debugger")
+DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
+                                    "debugger agent in another process")
+DEFINE_bool(debugger_agent, false, "Enable debugger agent")
+DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
 DEFINE_string(map_counters, false, "Map counters to a file")
 DEFINE_args(js_arguments, JSArguments(),
             "Pass all remaining arguments to the script. Alias for \"--\".")
@@ -243,9 +248,6 @@
 DEFINE_bool(enable_slow_asserts, false,
             "enable asserts that are slow to execute")
 
-// code-stubs.cc
-DEFINE_bool(print_code_stubs, false, "print code stubs")
-
 // codegen-ia32.cc / codegen-arm.cc
 DEFINE_bool(trace_codegen, false,
             "print name of functions for which code is generated")
@@ -332,6 +334,8 @@
 DEFINE_bool(sliding_state_window, false,
             "Update sliding state window counters.")
 DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
+DEFINE_bool(oprofile, false,
+            "Enable JIT agent for OProfile.")
 
 //
 // Disassembler only flags
@@ -343,6 +347,9 @@
 #define FLAG FLAG_READONLY
 #endif
 
+// code-stubs.cc
+DEFINE_bool(print_code_stubs, false, "print code stubs")
+
 // codegen-ia32.cc / codegen-arm.cc
 DEFINE_bool(print_code, false, "print generated code")
 DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
diff --git a/src/frames-inl.h b/src/frames-inl.h
index b34a0ab..c9d3ab6 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -169,7 +169,8 @@
 }
 
 
-inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
+template<typename Iterator>
+inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
   // TODO(1233797): The frame hierarchy needs to change. It's
   // problematic that we can't use the safe-cast operator to cast to
   // the JavaScript frame type, because we may encounter arguments
@@ -180,6 +181,39 @@
 }
 
 
+template<typename Iterator>
+JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
+    StackFrame::Id id) {
+  while (!done()) {
+    Advance();
+    if (frame()->id() == id) return;
+  }
+}
+
+
+template<typename Iterator>
+void JavaScriptFrameIteratorTemp<Iterator>::Advance() {
+  do {
+    iterator_.Advance();
+  } while (!iterator_.done() && !iterator_.frame()->is_java_script());
+}
+
+
+template<typename Iterator>
+void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToArgumentsFrame() {
+  if (!frame()->has_adapted_arguments()) return;
+  iterator_.Advance();
+  ASSERT(iterator_.frame()->is_arguments_adaptor());
+}
+
+
+template<typename Iterator>
+void JavaScriptFrameIteratorTemp<Iterator>::Reset() {
+  iterator_.Reset();
+  if (!done()) Advance();
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_FRAMES_INL_H_
diff --git a/src/frames.cc b/src/frames.cc
index 20a7149..3270797 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -74,6 +74,11 @@
       frame_(NULL), handler_(NULL), thread_(t) {
   Reset();
 }
+StackFrameIterator::StackFrameIterator(bool reset)
+    : STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+      frame_(NULL), handler_(NULL), thread_(Top::GetCurrentThread()) {
+  if (reset) Reset();
+}
 #undef INITIALIZE_SINGLETON
 
 
@@ -131,34 +136,79 @@
 // -------------------------------------------------------------------------
 
 
-JavaScriptFrameIterator::JavaScriptFrameIterator(StackFrame::Id id) {
+StackTraceFrameIterator::StackTraceFrameIterator() {
+  if (!done() && !frame()->function()->IsJSFunction()) Advance();
+}
+
+
+void StackTraceFrameIterator::Advance() {
   while (true) {
-    Advance();
-    if (frame()->id() == id) return;
+    JavaScriptFrameIterator::Advance();
+    if (done()) return;
+    if (frame()->function()->IsJSFunction()) return;
   }
 }
 
 
-void JavaScriptFrameIterator::Advance() {
-  do {
+// -------------------------------------------------------------------------
+
+
+SafeStackFrameIterator::SafeStackFrameIterator(
+    Address low_bound, Address high_bound) :
+    low_bound_(low_bound), high_bound_(high_bound),
+    is_working_iterator_(IsInBounds(low_bound, high_bound,
+                                    Top::c_entry_fp(Top::GetCurrentThread()))),
+    iteration_done_(!is_working_iterator_), iterator_(is_working_iterator_) {
+}
+
+
+void SafeStackFrameIterator::Advance() {
+  ASSERT(is_working_iterator_);
+  ASSERT(!done());
+  StackFrame* frame = iterator_.frame();
+  iteration_done_ =
+      !IsGoodStackAddress(frame->sp()) || !IsGoodStackAddress(frame->fp());
+  if (!iteration_done_) {
     iterator_.Advance();
-  } while (!iterator_.done() && !iterator_.frame()->is_java_script());
+    if (!iterator_.done()) {
+      // Check that we have actually moved to the previous frame in the stack
+      StackFrame* prev_frame = iterator_.frame();
+      iteration_done_ =
+          prev_frame->sp() < frame->sp() || prev_frame->fp() < frame->fp();
+    }
+  }
 }
 
 
-void JavaScriptFrameIterator::AdvanceToArgumentsFrame() {
-  if (!frame()->has_adapted_arguments()) return;
-  iterator_.Advance();
-  ASSERT(iterator_.frame()->is_arguments_adaptor());
+void SafeStackFrameIterator::Reset() {
+  if (is_working_iterator_) {
+    iterator_.Reset();
+    iteration_done_ = false;
+  }
 }
 
 
-void JavaScriptFrameIterator::Reset() {
-  iterator_.Reset();
-  Advance();
+// -------------------------------------------------------------------------
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+SafeStackTraceFrameIterator::SafeStackTraceFrameIterator(
+    Address low_bound, Address high_bound) :
+    SafeJavaScriptFrameIterator(low_bound, high_bound) {
+  if (!done() && !frame()->function()->IsJSFunction()) Advance();
 }
 
 
+void SafeStackTraceFrameIterator::Advance() {
+  while (true) {
+    SafeJavaScriptFrameIterator::Advance();
+    if (done()) return;
+    if (frame()->function()->IsJSFunction()) return;
+  }
+}
+#endif
+
+
 // -------------------------------------------------------------------------
 
 
diff --git a/src/frames.h b/src/frames.h
index c18cb74..e6dbd24 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -509,6 +509,9 @@
   // An iterator that iterates over a given thread's stack.
   explicit StackFrameIterator(ThreadLocalTop* thread);
 
+  // An iterator that conditionally resets itself on init.
+  explicit StackFrameIterator(bool reset);
+
   StackFrame* frame() const {
     ASSERT(!done());
     return frame_;
@@ -542,16 +545,21 @@
 
 
 // Iterator that supports iterating through all JavaScript frames.
-class JavaScriptFrameIterator BASE_EMBEDDED {
+template<typename Iterator>
+class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
  public:
-  JavaScriptFrameIterator() { if (!done()) Advance(); }
+  JavaScriptFrameIteratorTemp() { if (!done()) Advance(); }
 
-  explicit JavaScriptFrameIterator(ThreadLocalTop* thread) : iterator_(thread) {
+  explicit JavaScriptFrameIteratorTemp(ThreadLocalTop* thread) :
+      iterator_(thread) {
     if (!done()) Advance();
   }
 
   // Skip frames until the frame with the given id is reached.
-  explicit JavaScriptFrameIterator(StackFrame::Id id);
+  explicit JavaScriptFrameIteratorTemp(StackFrame::Id id);
+
+  explicit JavaScriptFrameIteratorTemp(Address low_bound, Address high_bound) :
+      iterator_(low_bound, high_bound) { if (!done()) Advance(); }
 
   inline JavaScriptFrame* frame() const;
 
@@ -567,10 +575,68 @@
   void Reset();
 
  private:
+  Iterator iterator_;
+};
+
+
+typedef JavaScriptFrameIteratorTemp<StackFrameIterator> JavaScriptFrameIterator;
+
+
+// NOTE: The stack trace frame iterator is an iterator that only
+// traverse proper JavaScript frames; that is JavaScript frames that
+// have proper JavaScript functions. This excludes the problematic
+// functions in runtime.js.
+class StackTraceFrameIterator: public JavaScriptFrameIterator {
+ public:
+  StackTraceFrameIterator();
+  void Advance();
+};
+
+
+class SafeStackFrameIterator BASE_EMBEDDED {
+ public:
+  explicit SafeStackFrameIterator(Address low_bound, Address high_bound);
+
+  StackFrame* frame() const {
+    ASSERT(is_working_iterator_);
+    return iterator_.frame();
+  }
+
+  bool done() const { return iteration_done_ ? true : iterator_.done(); }
+
+  void Advance();
+  void Reset();
+
+ private:
+  static bool IsInBounds(
+      Address low_bound, Address high_bound, Address addr) {
+    return low_bound <= addr && addr <= high_bound;
+  }
+  bool IsGoodStackAddress(Address addr) const {
+    return IsInBounds(low_bound_, high_bound_, addr);
+  }
+
+  Address low_bound_;
+  Address high_bound_;
+  const bool is_working_iterator_;
+  bool iteration_done_;
   StackFrameIterator iterator_;
 };
 
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
+typedef JavaScriptFrameIteratorTemp<SafeStackFrameIterator>
+    SafeJavaScriptFrameIterator;
+
+
+class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator {
+ public:
+  explicit SafeStackTraceFrameIterator(Address low_bound, Address high_bound);
+  void Advance();
+};
+#endif
+
+
 class StackFrameLocator BASE_EMBEDDED {
  public:
   // Find the nth JavaScript frame on the stack. The caller must
diff --git a/src/globals.h b/src/globals.h
index dc87ac8..584ec62 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -141,6 +141,7 @@
 
 class AccessorInfo;
 class Allocation;
+class Arguments;
 class Assembler;
 class BreakableStatement;
 class Code;
@@ -170,7 +171,6 @@
 class JSArray;
 class JSFunction;
 class JSObject;
-class LabelCollector;
 class LargeObjectSpace;
 template <typename T, class P = FreeStoreAllocationPolicy> class List;
 class LookupResult;
diff --git a/src/handles.cc b/src/handles.cc
index 3f198c8..48065dd 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -186,9 +186,7 @@
 
 
 void FlattenString(Handle<String> string) {
-  StringShape shape(*string);
-  if (string->IsFlat(shape)) return;
-  CALL_HEAP_FUNCTION_VOID(string->TryFlatten(shape));
+  CALL_HEAP_FUNCTION_VOID(string->TryFlattenIfNotFlat(StringShape(*string)));
   ASSERT(string->IsFlat(StringShape(*string)));
 }
 
@@ -343,6 +341,78 @@
 }
 
 
+// Init line_ends array with code positions of line ends inside script
+// source.
+void InitScriptLineEnds(Handle<Script> script) {
+  if (!script->line_ends()->IsUndefined()) return;
+
+  if (!script->source()->IsString()) {
+    ASSERT(script->source()->IsUndefined());
+    script->set_line_ends(*(Factory::NewJSArray(0)));
+    ASSERT(script->line_ends()->IsJSArray());
+    return;
+  }
+
+  Handle<String> src(String::cast(script->source()));
+  const int src_len = src->length();
+  Handle<String> new_line = Factory::NewStringFromAscii(CStrVector("\n"));
+
+  // Pass 1: Identify line count.
+  int line_count = 0;
+  int position = 0;
+  while (position != -1 && position < src_len) {
+    position = Runtime::StringMatch(src, new_line, position);
+    if (position != -1) {
+      position++;
+    }
+    // Even if the last line misses a line end, it is counted.
+    line_count++;
+  }
+
+  // Pass 2: Fill in line ends positions
+  Handle<FixedArray> array = Factory::NewFixedArray(line_count);
+  int array_index = 0;
+  position = 0;
+  while (position != -1 && position < src_len) {
+    position = Runtime::StringMatch(src, new_line, position);
+    // If the script does not end with a line ending add the final end
+    // position as just past the last line ending.
+    array->set(array_index++,
+               Smi::FromInt(position != -1 ? position++ : src_len));
+  }
+  ASSERT(array_index == line_count);
+
+  Handle<JSArray> object = Factory::NewJSArrayWithElements(array);
+  script->set_line_ends(*object);
+  ASSERT(script->line_ends()->IsJSArray());
+}
+
+
+// Convert code position into line number.
+int GetScriptLineNumber(Handle<Script> script, int code_pos) {
+  InitScriptLineEnds(script);
+  AssertNoAllocation no_allocation;
+  JSArray* line_ends_array = JSArray::cast(script->line_ends());
+  const int line_ends_len = (Smi::cast(line_ends_array->length()))->value();
+
+  int line = -1;
+  if (line_ends_len > 0 &&
+      code_pos <= (Smi::cast(line_ends_array->GetElement(0)))->value()) {
+    line = 0;
+  } else {
+    for (int i = 1; i < line_ends_len; ++i) {
+      if ((Smi::cast(line_ends_array->GetElement(i - 1)))->value() < code_pos &&
+          code_pos <= (Smi::cast(line_ends_array->GetElement(i)))->value()) {
+        line = i;
+        break;
+      }
+    }
+  }
+
+  return line != -1 ? line + script->line_offset()->value() : line;
+}
+
+
 // Compute the property keys from the interceptor.
 v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
                                                  Handle<JSObject> object) {
@@ -467,7 +537,7 @@
     for (DescriptorReader r(object->map()->instance_descriptors());
          !r.eos();
          r.advance()) {
-      if (!r.IsTransition() && !r.IsDontEnum()) {
+      if (r.IsProperty() && !r.IsDontEnum()) {
         (*storage)->set(index, r.GetKey());
         (*sort_array)->set(index, Smi::FromInt(r.GetDetails().index()));
         index++;
diff --git a/src/handles.h b/src/handles.h
index 24cc097..21af7fc 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -225,6 +225,10 @@
 // if none exists.
 Handle<JSValue> GetScriptWrapper(Handle<Script> script);
 
+// Script line number computations.
+void InitScriptLineEnds(Handle<Script> script);
+int GetScriptLineNumber(Handle<Script> script, int code_position);
+
 // Computes the enumerable keys from interceptors. Used for debug mirrors and
 // by GetKeysInFixedArrayFor below.
 v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 3c9733d..6090dd4 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -211,6 +211,11 @@
 }
 
 
+void Heap::SetLastScriptId(Object* last_script_id) {
+  last_script_id_ = last_script_id;
+}
+
+
 #define GC_GREEDY_CHECK() \
   ASSERT(!FLAG_gc_greedy || v8::internal::Heap::GarbageCollectionGreedyCheck())
 
@@ -226,43 +231,43 @@
   do {                                                                    \
     GC_GREEDY_CHECK();                                                    \
     Object* __object__ = FUNCTION_CALL;                                   \
-    if (!__object__->IsFailure()) return RETURN_VALUE;                    \
+    if (!__object__->IsFailure()) RETURN_VALUE;                           \
     if (__object__->IsOutOfMemoryFailure()) {                             \
       v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0");      \
     }                                                                     \
-    if (!__object__->IsRetryAfterGC()) return RETURN_EMPTY;               \
+    if (!__object__->IsRetryAfterGC()) RETURN_EMPTY;                      \
     Heap::CollectGarbage(Failure::cast(__object__)->requested(),          \
                          Failure::cast(__object__)->allocation_space());  \
     __object__ = FUNCTION_CALL;                                           \
-    if (!__object__->IsFailure()) return RETURN_VALUE;                    \
+    if (!__object__->IsFailure()) RETURN_VALUE;                           \
     if (__object__->IsOutOfMemoryFailure()) {                             \
       v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1");      \
     }                                                                     \
-    if (!__object__->IsRetryAfterGC()) return RETURN_EMPTY;               \
+    if (!__object__->IsRetryAfterGC()) RETURN_EMPTY;                      \
     Counters::gc_last_resort_from_handles.Increment();                    \
     Heap::CollectAllGarbage();                                            \
     {                                                                     \
       AlwaysAllocateScope __scope__;                                      \
       __object__ = FUNCTION_CALL;                                         \
     }                                                                     \
-    if (!__object__->IsFailure()) return RETURN_VALUE;                    \
+    if (!__object__->IsFailure()) RETURN_VALUE;                           \
     if (__object__->IsOutOfMemoryFailure()) {                             \
       /* TODO(1181417): Fix this. */                                      \
       v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2");      \
     }                                                                     \
     ASSERT(!__object__->IsRetryAfterGC());                                \
-    return RETURN_EMPTY;                                                  \
+    RETURN_EMPTY;                                                         \
   } while (false)
 
 
-#define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE)         \
-  CALL_AND_RETRY(FUNCTION_CALL,                         \
-                 Handle<TYPE>(TYPE::cast(__object__)),  \
-                 Handle<TYPE>())
+#define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE)                \
+  CALL_AND_RETRY(FUNCTION_CALL,                                \
+                 return Handle<TYPE>(TYPE::cast(__object__)),  \
+                 return Handle<TYPE>())
 
 
 #define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \
-  CALL_AND_RETRY(FUNCTION_CALL, , )
+  CALL_AND_RETRY(FUNCTION_CALL, return, return)
 
 
 #ifdef DEBUG
diff --git a/src/heap.cc b/src/heap.cc
index 3a6f301..8159311 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1238,6 +1238,9 @@
   if (obj->IsFailure()) return false;
   natives_source_cache_ = FixedArray::cast(obj);
 
+  // Handling of script id generation is in Factory::NewScript.
+  last_script_id_ = undefined_value();
+
   // Initialize keyed lookup cache.
   ClearKeyedLookupCache();
 
diff --git a/src/heap.h b/src/heap.h
index 712efb9..6fb28db 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -123,7 +123,8 @@
   V(FixedArray, number_string_cache)                    \
   V(FixedArray, single_character_string_cache)          \
   V(FixedArray, natives_source_cache)                   \
-  V(Object, keyed_lookup_cache)
+  V(Object, keyed_lookup_cache)                         \
+  V(Object, last_script_id)
 
 
 #define ROOT_LIST(V)                                  \
@@ -166,8 +167,6 @@
   V(char_at_symbol, "CharAt")                                            \
   V(undefined_symbol, "undefined")                                       \
   V(value_of_symbol, "valueOf")                                          \
-  V(CreateObjectLiteralBoilerplate_symbol, "CreateObjectLiteralBoilerplate") \
-  V(CreateArrayLiteral_symbol, "CreateArrayLiteral")                     \
   V(InitializeVarGlobal_symbol, "InitializeVarGlobal")                   \
   V(InitializeConstGlobal_symbol, "InitializeConstGlobal")               \
   V(stack_overflow_symbol, "kStackOverflowBoilerplate")                  \
@@ -692,6 +691,9 @@
   static inline void SetKeyedLookupCache(LookupCache* cache);
   static inline void ClearKeyedLookupCache();
 
+  // Update the next script id.
+  static inline void SetLastScriptId(Object* last_script_id);
+
 #ifdef DEBUG
   static void Print();
   static void PrintHandles();
diff --git a/src/ic-ia32.cc b/src/ic-ia32.cc
index 5603077..9060f2d 100644
--- a/src/ic-ia32.cc
+++ b/src/ic-ia32.cc
@@ -130,9 +130,7 @@
   __ test(value, Immediate(kSmiTagMask));
   __ j(zero, &done, not_taken);
   // Check if the value is a function.
-  __ mov(scratch, FieldOperand(value, HeapObject::kMapOffset));
-  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ cmp(scratch, JS_FUNCTION_TYPE);
+  __ CmpObjectType(value, JS_FUNCTION_TYPE, scratch);
   __ j(not_equal, &done, taken);
   // Check if the function has been loaded.
   __ mov(scratch, FieldOperand(value, JSFunction::kSharedFunctionInfoOffset));
@@ -441,9 +439,7 @@
   // Check for number.
   __ test(edx, Immediate(kSmiTagMask));
   __ j(zero, &number, not_taken);
-  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ cmp(ebx, HEAP_NUMBER_TYPE);
+  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
   __ j(not_equal, &non_number, taken);
   __ bind(&number);
   StubCompiler::GenerateLoadGlobalFunctionPrototype(
@@ -491,9 +487,7 @@
   __ j(zero, miss, not_taken);
 
   // Check that the value is a JavaScript function.
-  __ mov(edx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
-  __ cmp(edx, JS_FUNCTION_TYPE);
+  __ CmpObjectType(edx, JS_FUNCTION_TYPE, edx);
   __ j(not_equal, miss, not_taken);
 
   // Check that the function has been loaded.
@@ -739,15 +733,15 @@
   // The keyed load has a fast inlined case if the IC call instruction
   // is immediately followed by a test instruction.
   if (*test_instruction_address == kTestEaxByte) {
-    // Fetch the offset from the call instruction to the map cmp
+    // Fetch the offset from the test instruction to the map cmp
     // instruction.  This offset is stored in the last 4 bytes of the
     // 5 byte test instruction.
     Address offset_address = test_instruction_address + 1;
     int offset_value = *(reinterpret_cast<int*>(offset_address));
-    // Compute the map address.  The operand-immediate compare
-    // instruction is two bytes larger than a call instruction so we
-    // add 2 to get to the map address.
-    Address map_address = address + offset_value + 2;
+    // Compute the map address.  The map address is in the last 4
+    // bytes of the 7-byte operand-immediate compare instruction, so
+    // we add 3 to the offset to get the map address.
+    Address map_address = test_instruction_address + offset_value + 3;
     // patch the map check.
     (*(reinterpret_cast<Object**>(map_address))) = value;
   }
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index d6ab18f..33f6ec6 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -39,7 +39,7 @@
 namespace v8 { namespace internal {
 
 
-static unibrow::Mapping<unibrow::Ecma262Canonicalize> canonicalize;
+static unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize;
 
 
 static bool BackRefMatchesNoCase(int from,
@@ -50,8 +50,8 @@
     unibrow::uchar old_char = subject[from++];
     unibrow::uchar new_char = subject[current++];
     if (old_char == new_char) continue;
-    canonicalize.get(old_char, '\0', &old_char);
-    canonicalize.get(new_char, '\0', &new_char);
+    interp_canonicalize.get(old_char, '\0', &old_char);
+    interp_canonicalize.get(new_char, '\0', &new_char);
     if (old_char != new_char) {
       return false;
     }
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 324d0f9..7644cea 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -213,55 +213,54 @@
   Handle<Object> result;
   if (in_cache) {
     re->set_data(*cached);
-    result = re;
-  } else {
-    FlattenString(pattern);
-    ZoneScope zone_scope(DELETE_ON_EXIT);
-    RegExpCompileData parse_result;
-    FlatStringReader reader(pattern);
-    if (!ParseRegExp(&reader, flags.is_multiline(), &parse_result)) {
-      // Throw an exception if we fail to parse the pattern.
-      ThrowRegExpException(re,
-                           pattern,
-                           parse_result.error,
-                           "malformed_regexp");
-      return Handle<Object>::null();
-    }
-
-    if (parse_result.simple && !flags.is_ignore_case()) {
-      // Parse-tree is a single atom that is equal to the pattern.
-      result = AtomCompile(re, pattern, flags, pattern);
-    } else if (parse_result.tree->IsAtom() &&
-        !flags.is_ignore_case() &&
-        parse_result.capture_count == 0) {
-      RegExpAtom* atom = parse_result.tree->AsAtom();
-      Vector<const uc16> atom_pattern = atom->data();
-      Handle<String> atom_string = Factory::NewStringFromTwoByte(atom_pattern);
-      result = AtomCompile(re, pattern, flags, atom_string);
-    } else {
-      result = IrregexpPrepare(re, pattern, flags);
-    }
-    Object* data = re->data();
-    if (data->IsFixedArray()) {
-      // If compilation succeeded then the data is set on the regexp
-      // and we can store it in the cache.
-      Handle<FixedArray> data(FixedArray::cast(re->data()));
-      CompilationCache::PutRegExp(pattern, flags, data);
-    }
+    return re;
+  }
+  FlattenString(pattern);
+  ZoneScope zone_scope(DELETE_ON_EXIT);
+  RegExpCompileData parse_result;
+  FlatStringReader reader(pattern);
+  if (!ParseRegExp(&reader, flags.is_multiline(), &parse_result)) {
+    // Throw an exception if we fail to parse the pattern.
+    ThrowRegExpException(re,
+                         pattern,
+                         parse_result.error,
+                         "malformed_regexp");
+    return Handle<Object>::null();
   }
 
-  return result;
+  if (parse_result.simple && !flags.is_ignore_case()) {
+    // Parse-tree is a single atom that is equal to the pattern.
+    AtomCompile(re, pattern, flags, pattern);
+  } else if (parse_result.tree->IsAtom() &&
+      !flags.is_ignore_case() &&
+      parse_result.capture_count == 0) {
+    RegExpAtom* atom = parse_result.tree->AsAtom();
+    Vector<const uc16> atom_pattern = atom->data();
+    Handle<String> atom_string = Factory::NewStringFromTwoByte(atom_pattern);
+    AtomCompile(re, pattern, flags, atom_string);
+  } else {
+    IrregexpPrepare(re, pattern, flags, parse_result.capture_count);
+  }
+  ASSERT(re->data()->IsFixedArray());
+  // Compilation succeeded so the data is set on the regexp
+  // and we can store it in the cache.
+  Handle<FixedArray> data(FixedArray::cast(re->data()));
+  CompilationCache::PutRegExp(pattern, flags, data);
+
+  return re;
 }
 
 
 Handle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
                                 Handle<String> subject,
-                                Handle<Object> index) {
+                                int index,
+                                Handle<JSArray> last_match_info) {
   switch (regexp->TypeTag()) {
     case JSRegExp::ATOM:
-      return AtomExec(regexp, subject, index);
+      return AtomExec(regexp, subject, index, last_match_info);
     case JSRegExp::IRREGEXP: {
-      Handle<Object> result = IrregexpExec(regexp, subject, index);
+      Handle<Object> result =
+          IrregexpExec(regexp, subject, index, last_match_info);
       ASSERT(!result.is_null() || Top::has_pending_exception());
       return result;
     }
@@ -273,12 +272,14 @@
 
 
 Handle<Object> RegExpImpl::ExecGlobal(Handle<JSRegExp> regexp,
-                                Handle<String> subject) {
+                                      Handle<String> subject,
+                                      Handle<JSArray> last_match_info) {
   switch (regexp->TypeTag()) {
     case JSRegExp::ATOM:
-      return AtomExecGlobal(regexp, subject);
+      return AtomExecGlobal(regexp, subject, last_match_info);
     case JSRegExp::IRREGEXP: {
-      Handle<Object> result = IrregexpExecGlobal(regexp, subject);
+      Handle<Object> result =
+          IrregexpExecGlobal(regexp, subject, last_match_info);
       ASSERT(!result.is_null() || Top::has_pending_exception());
       return result;
     }
@@ -292,60 +293,95 @@
 // RegExp Atom implementation: Simple string search using indexOf.
 
 
-Handle<Object> RegExpImpl::AtomCompile(Handle<JSRegExp> re,
-                                       Handle<String> pattern,
-                                       JSRegExp::Flags flags,
-                                       Handle<String> match_pattern) {
-  Factory::SetRegExpData(re, JSRegExp::ATOM, pattern, flags, match_pattern);
-  return re;
+void RegExpImpl::AtomCompile(Handle<JSRegExp> re,
+                             Handle<String> pattern,
+                             JSRegExp::Flags flags,
+                             Handle<String> match_pattern) {
+  Factory::SetRegExpAtomData(re,
+                             JSRegExp::ATOM,
+                             pattern,
+                             flags,
+                             match_pattern);
+}
+
+
+static void SetAtomLastCapture(FixedArray* array,
+                               String* subject,
+                               int from,
+                               int to) {
+  NoHandleAllocation no_handles;
+  RegExpImpl::SetLastCaptureCount(array, 2);
+  RegExpImpl::SetLastSubject(array, subject);
+  RegExpImpl::SetLastInput(array, subject);
+  RegExpImpl::SetCapture(array, 0, from);
+  RegExpImpl::SetCapture(array, 1, to);
 }
 
 
 Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
                                     Handle<String> subject,
-                                    Handle<Object> index) {
+                                    int index,
+                                    Handle<JSArray> last_match_info) {
   Handle<String> needle(String::cast(re->DataAt(JSRegExp::kAtomPatternIndex)));
 
-  uint32_t start_index;
-  if (!Array::IndexFromObject(*index, &start_index)) {
-    return Handle<Smi>(Smi::FromInt(-1));
-  }
+  uint32_t start_index = index;
 
   int value = Runtime::StringMatch(subject, needle, start_index);
   if (value == -1) return Factory::null_value();
+  ASSERT(last_match_info->HasFastElements());
 
-  Handle<FixedArray> array = Factory::NewFixedArray(2);
-  array->set(0, Smi::FromInt(value));
-  array->set(1, Smi::FromInt(value + needle->length()));
-  return Factory::NewJSArrayWithElements(array);
+  {
+    NoHandleAllocation no_handles;
+    FixedArray* array = last_match_info->elements();
+    SetAtomLastCapture(array, *subject, value, value + needle->length());
+  }
+  return last_match_info;
 }
 
 
 Handle<Object> RegExpImpl::AtomExecGlobal(Handle<JSRegExp> re,
-                                          Handle<String> subject) {
+                                          Handle<String> subject,
+                                          Handle<JSArray> last_match_info) {
   Handle<String> needle(String::cast(re->DataAt(JSRegExp::kAtomPatternIndex)));
+  ASSERT(last_match_info->HasFastElements());
   Handle<JSArray> result = Factory::NewJSArray(1);
   int index = 0;
   int match_count = 0;
   int subject_length = subject->length();
   int needle_length = needle->length();
+  int last_value = -1;
   while (true) {
+    HandleScope scope;
     int value = -1;
     if (index + needle_length <= subject_length) {
       value = Runtime::StringMatch(subject, needle, index);
     }
-    if (value == -1) break;
-    HandleScope scope;
+    if (value == -1) {
+      if (last_value != -1) {
+        Handle<FixedArray> array(last_match_info->elements());
+        SetAtomLastCapture(*array,
+                           *subject,
+                           last_value,
+                           last_value + needle->length());
+      }
+      break;
+    }
+
     int end = value + needle_length;
 
-    Handle<FixedArray> array = Factory::NewFixedArray(2);
-    array->set(0, Smi::FromInt(value));
-    array->set(1, Smi::FromInt(end));
+    // Create an array that looks like the static last_match_info array
+    // that is attached to the global RegExp object.  We will be returning
+    // an array of these.
+    Handle<FixedArray> array = Factory::NewFixedArray(kFirstCapture + 2);
+    SetCapture(*array, 0, value);
+    SetCapture(*array, 1, end);
+    SetLastCaptureCount(*array, 2);
     Handle<JSArray> pair = Factory::NewJSArrayWithElements(array);
     SetElement(result, match_count, pair);
     match_count++;
     index = end;
     if (needle_length == 0) index++;
+    last_value = value;
   }
   return result;
 }
@@ -354,23 +390,29 @@
 // Irregexp implementation.
 
 
-// Retrieves a compiled version of the regexp for either ASCII or non-ASCII
-// strings. If the compiled version doesn't already exist, it is compiled
+// Ensures that the regexp object contains a compiled version of the
+// source for either ASCII or non-ASCII strings.
+// If the compiled version doesn't already exist, it is compiled
 // from the source pattern.
-// Irregexp is not feature complete yet. If there is something in the
-// regexp that the compiler cannot currently handle, an empty
-// handle is returned, but no exception is thrown.
-static Handle<FixedArray> GetCompiledIrregexp(Handle<JSRegExp> re,
-                                              bool is_ascii) {
-  ASSERT(re->DataAt(JSRegExp::kIrregexpDataIndex)->IsFixedArray());
-  Handle<FixedArray> alternatives(
-      FixedArray::cast(re->DataAt(JSRegExp::kIrregexpDataIndex)));
-  ASSERT_EQ(2, alternatives->length());
-
-  int index = is_ascii ? 0 : 1;
-  Object* entry = alternatives->get(index);
-  if (!entry->IsNull()) {
-    return Handle<FixedArray>(FixedArray::cast(entry));
+// If compilation fails, an exception is thrown and this function
+// returns false.
+bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re,
+                                        bool is_ascii) {
+  int index;
+  if (is_ascii) {
+    index = JSRegExp::kIrregexpASCIICodeIndex;
+  } else {
+    index = JSRegExp::kIrregexpUC16CodeIndex;
+  }
+  Object* entry = re->DataAt(index);
+  if (!entry->IsTheHole()) {
+    // A value has already been compiled.
+    if (entry->IsJSObject()) {
+      // If it's a JS value, it's an error.
+      Top::Throw(entry);
+      return false;
+    }
+    return true;
   }
 
   // Compile the RegExp.
@@ -392,77 +434,115 @@
                          pattern,
                          compile_data.error,
                          "malformed_regexp");
-    return Handle<FixedArray>::null();
+    return false;
   }
-  Handle<FixedArray> compiled_entry =
+  RegExpEngine::CompilationResult result =
       RegExpEngine::Compile(&compile_data,
                             flags.is_ignore_case(),
                             flags.is_multiline(),
                             pattern,
                             is_ascii);
-  if (!compiled_entry.is_null()) {
-    alternatives->set(index, *compiled_entry);
+  if (result.error_message != NULL) {
+    // Unable to compile regexp.
+    Handle<JSArray> array = Factory::NewJSArray(2);
+    SetElement(array, 0, pattern);
+    SetElement(array,
+               1,
+               Factory::NewStringFromUtf8(CStrVector(result.error_message)));
+    Handle<Object> regexp_err =
+        Factory::NewSyntaxError("malformed_regexp", array);
+    Top::Throw(*regexp_err);
+    re->SetDataAt(index, *regexp_err);
+    return false;
   }
-  return compiled_entry;
+
+  NoHandleAllocation no_handles;
+
+  FixedArray* data = FixedArray::cast(re->data());
+  data->set(index, result.code);
+  int register_max = IrregexpMaxRegisterCount(data);
+  if (result.num_registers > register_max) {
+    SetIrregexpMaxRegisterCount(data, result.num_registers);
+  }
+
+  return true;
 }
 
 
-int RegExpImpl::IrregexpNumberOfCaptures(Handle<FixedArray> irre) {
-  return Smi::cast(irre->get(kIrregexpNumberOfCapturesIndex))->value();
+int RegExpImpl::IrregexpMaxRegisterCount(FixedArray* re) {
+  return Smi::cast(
+      re->get(JSRegExp::kIrregexpMaxRegisterCountIndex))->value();
 }
 
 
-int RegExpImpl::IrregexpNumberOfRegisters(Handle<FixedArray> irre) {
-  return Smi::cast(irre->get(kIrregexpNumberOfRegistersIndex))->value();
+void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray* re, int value) {
+  re->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value));
 }
 
 
-Handle<ByteArray> RegExpImpl::IrregexpByteCode(Handle<FixedArray> irre) {
-  ASSERT(Smi::cast(irre->get(kIrregexpImplementationIndex))->value()
-      == RegExpMacroAssembler::kBytecodeImplementation);
-  return Handle<ByteArray>(ByteArray::cast(irre->get(kIrregexpCodeIndex)));
+int RegExpImpl::IrregexpNumberOfCaptures(FixedArray* re) {
+  return Smi::cast(re->get(JSRegExp::kIrregexpCaptureCountIndex))->value();
 }
 
 
-Handle<Code> RegExpImpl::IrregexpNativeCode(Handle<FixedArray> irre) {
-  ASSERT(Smi::cast(irre->get(kIrregexpImplementationIndex))->value()
-      != RegExpMacroAssembler::kBytecodeImplementation);
-  return Handle<Code>(Code::cast(irre->get(kIrregexpCodeIndex)));
+int RegExpImpl::IrregexpNumberOfRegisters(FixedArray* re) {
+  return Smi::cast(re->get(JSRegExp::kIrregexpMaxRegisterCountIndex))->value();
 }
 
 
-Handle<Object>RegExpImpl::IrregexpPrepare(Handle<JSRegExp> re,
-                                          Handle<String> pattern,
-                                          JSRegExp::Flags flags) {
-  // Make space for ASCII and UC16 versions.
-  Handle<FixedArray> alternatives = Factory::NewFixedArray(2);
-  alternatives->set_null(0);
-  alternatives->set_null(1);
-  Factory::SetRegExpData(re, JSRegExp::IRREGEXP, pattern, flags, alternatives);
-  return re;
+ByteArray* RegExpImpl::IrregexpByteCode(FixedArray* re, bool is_ascii) {
+  int index;
+  if (is_ascii) {
+    index = JSRegExp::kIrregexpASCIICodeIndex;
+  } else {
+    index = JSRegExp::kIrregexpUC16CodeIndex;
+  }
+  return ByteArray::cast(re->get(index));
+}
+
+
+Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_ascii) {
+  int index;
+  if (is_ascii) {
+    index = JSRegExp::kIrregexpASCIICodeIndex;
+  } else {
+    index = JSRegExp::kIrregexpUC16CodeIndex;
+  }
+  return Code::cast(re->get(index));
+}
+
+
+void RegExpImpl::IrregexpPrepare(Handle<JSRegExp> re,
+                                 Handle<String> pattern,
+                                 JSRegExp::Flags flags,
+                                 int capture_count) {
+  // Initialize compiled code entries to null.
+  Factory::SetRegExpIrregexpData(re,
+                                 JSRegExp::IRREGEXP,
+                                 pattern,
+                                 flags,
+                                 capture_count);
 }
 
 
 Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
                                         Handle<String> subject,
-                                        Handle<Object> index) {
+                                        int index,
+                                        Handle<JSArray> last_match_info) {
   ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
-  ASSERT(regexp->DataAt(JSRegExp::kIrregexpDataIndex)->IsFixedArray());
 
   bool is_ascii = StringShape(*subject).IsAsciiRepresentation();
-  Handle<FixedArray> irregexp = GetCompiledIrregexp(regexp, is_ascii);
-  if (irregexp.is_null()) {
-    // We can't handle the RegExp with IRRegExp.
+  if (!EnsureCompiledIrregexp(regexp, is_ascii)) {
     return Handle<Object>::null();
   }
 
   // Prepare space for the return values.
-  int number_of_registers = IrregexpNumberOfRegisters(irregexp);
-  OffsetsVector offsets(number_of_registers);
+  Handle<FixedArray> re_data(FixedArray::cast(regexp->data()));
+  int number_of_capture_registers =
+      (IrregexpNumberOfCaptures(*re_data) + 1) * 2;
+  OffsetsVector offsets(number_of_capture_registers);
 
-  int num_captures = IrregexpNumberOfCaptures(irregexp);
-
-  int previous_index = static_cast<int>(DoubleToInteger(index->Number()));
+  int previous_index = index;
 
 #ifdef DEBUG
   if (FLAG_trace_regexp_bytecodes) {
@@ -476,8 +556,11 @@
     FlattenString(subject);
   }
 
-  return IrregexpExecOnce(irregexp,
-                          num_captures,
+  last_match_info->EnsureSize(number_of_capture_registers + kLastMatchOverhead);
+
+  return IrregexpExecOnce(re_data,
+                          number_of_capture_registers,
+                          last_match_info,
                           subject,
                           previous_index,
                           offsets.vector(),
@@ -486,29 +569,33 @@
 
 
 Handle<Object> RegExpImpl::IrregexpExecGlobal(Handle<JSRegExp> regexp,
-                                              Handle<String> subject) {
+                                              Handle<String> subject,
+                                              Handle<JSArray> last_match_info) {
   ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
+  Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()));
 
   bool is_ascii = StringShape(*subject).IsAsciiRepresentation();
-  Handle<FixedArray> irregexp = GetCompiledIrregexp(regexp, is_ascii);
-  if (irregexp.is_null()) {
+  if (!EnsureCompiledIrregexp(regexp, is_ascii)) {
     return Handle<Object>::null();
   }
 
   // Prepare space for the return values.
-  int number_of_registers = IrregexpNumberOfRegisters(irregexp);
-  OffsetsVector offsets(number_of_registers);
+  int number_of_capture_registers =
+      (IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
+  OffsetsVector offsets(number_of_capture_registers);
 
   int previous_index = 0;
 
   Handle<JSArray> result = Factory::NewJSArray(0);
-  int i = 0;
+  int result_length = 0;
   Handle<Object> matches;
 
   if (!subject->IsFlat(StringShape(*subject))) {
     FlattenString(subject);
   }
 
+  last_match_info->EnsureSize(number_of_capture_registers + kLastMatchOverhead);
+
   while (true) {
     if (previous_index > subject->length() || previous_index < 0) {
       // Per ECMA-262 15.10.6.2, if the previous index is greater than the
@@ -523,8 +610,10 @@
         PrintF("\n\nSubject string: '%s'\n\n", *(subject->ToCString()));
       }
 #endif
+      HandleScope scope;
       matches = IrregexpExecOnce(irregexp,
-                                 IrregexpNumberOfCaptures(irregexp),
+                                 number_of_capture_registers,
+                                 last_match_info,
                                  subject,
                                  previous_index,
                                  offsets.vector(),
@@ -536,12 +625,25 @@
       }
 
       if (matches->IsJSArray()) {
-        SetElement(result, i, matches);
-        i++;
-        previous_index = offsets.vector()[1];
-        if (offsets.vector()[0] == offsets.vector()[1]) {
-          previous_index++;
+        // Create an array that looks like the static last_match_info array
+        // that is attached to the global RegExp object.  We will be returning
+        // an array of these.
+        Handle<FixedArray> matches_array(JSArray::cast(*matches)->elements());
+        Handle<JSArray> latest_match =
+            Factory::NewJSArray(kFirstCapture + number_of_capture_registers);
+        Handle<FixedArray> latest_match_array(latest_match->elements());
+
+        for (int i = 0; i < number_of_capture_registers; i++) {
+          SetCapture(*latest_match_array, i, GetCapture(*matches_array, i));
         }
+        SetLastCaptureCount(*latest_match_array, number_of_capture_registers);
+
+        SetElement(result, result_length, latest_match);
+        result_length++;
+        previous_index = GetCapture(*matches_array, 1);
+        if (GetCapture(*matches_array, 0) == previous_index)
+          previous_index++;
+
       } else {
         ASSERT(matches->IsNull());
         return result;
@@ -551,131 +653,125 @@
 }
 
 
-Handle<Object> RegExpImpl::IrregexpExecOnce(Handle<FixedArray> irregexp,
-                                            int num_captures,
+Handle<Object> RegExpImpl::IrregexpExecOnce(Handle<FixedArray> regexp,
+                                            int number_of_capture_registers,
+                                            Handle<JSArray> last_match_info,
                                             Handle<String> subject,
                                             int previous_index,
                                             int* offsets_vector,
                                             int offsets_vector_length) {
-  ASSERT(subject->IsFlat(StringShape(*subject)));
+  StringShape shape(*subject);
+  ASSERT(subject->IsFlat(shape));
+  bool is_ascii = shape.IsAsciiRepresentation();
   bool rc;
 
-  int tag = Smi::cast(irregexp->get(kIrregexpImplementationIndex))->value();
-
-  switch (tag) {
-    case RegExpMacroAssembler::kIA32Implementation: {
+  Handle<String> original_subject = subject;
+  if (FLAG_regexp_native) {
 #ifndef ARM
-      Handle<Code> code = IrregexpNativeCode(irregexp);
+    Handle<Code> code(IrregexpNativeCode(*regexp, is_ascii));
 
-      StringShape shape(*subject);
+    // Character offsets into string.
+    int start_offset = previous_index;
+    int end_offset = subject->length(shape);
 
-      // Character offsets into string.
-      int start_offset = previous_index;
-      int end_offset = subject->length(shape);
+    if (shape.IsCons()) {
+      subject = Handle<String>(ConsString::cast(*subject)->first());
+    } else if (shape.IsSliced()) {
+      SlicedString* slice = SlicedString::cast(*subject);
+      start_offset += slice->start();
+      end_offset += slice->start();
+      subject = Handle<String>(slice->buffer());
+    }
 
-      if (shape.IsCons()) {
-        subject = Handle<String>(ConsString::cast(*subject)->first());
-      } else if (shape.IsSliced()) {
-        SlicedString* slice = SlicedString::cast(*subject);
-        start_offset += slice->start();
-        end_offset += slice->start();
-        subject = Handle<String>(slice->buffer());
+    // String is now either Sequential or External
+    StringShape flatshape(*subject);
+    bool is_ascii = flatshape.IsAsciiRepresentation();
+    int char_size_shift = is_ascii ? 0 : 1;
+
+    RegExpMacroAssemblerIA32::Result res;
+
+    if (flatshape.IsExternal()) {
+      const byte* address;
+      if (is_ascii) {
+        ExternalAsciiString* ext = ExternalAsciiString::cast(*subject);
+        address = reinterpret_cast<const byte*>(ext->resource()->data());
+      } else {
+        ExternalTwoByteString* ext = ExternalTwoByteString::cast(*subject);
+        address = reinterpret_cast<const byte*>(ext->resource()->data());
       }
+      res = RegExpMacroAssemblerIA32::Execute(
+          *code,
+          const_cast<Address*>(&address),
+          start_offset << char_size_shift,
+          end_offset << char_size_shift,
+          offsets_vector,
+          previous_index == 0);
+    } else {  // Sequential string
+      ASSERT(StringShape(*subject).IsSequential());
+      Address char_address =
+          is_ascii ? SeqAsciiString::cast(*subject)->GetCharsAddress()
+                   : SeqTwoByteString::cast(*subject)->GetCharsAddress();
+      int byte_offset = char_address - reinterpret_cast<Address>(*subject);
+      res = RegExpMacroAssemblerIA32::Execute(
+          *code,
+          reinterpret_cast<Address*>(subject.location()),
+          byte_offset + (start_offset << char_size_shift),
+          byte_offset + (end_offset << char_size_shift),
+          offsets_vector,
+          previous_index == 0);
+    }
 
-      // String is now either Sequential or External
-      StringShape flatshape(*subject);
-      bool is_ascii = flatshape.IsAsciiRepresentation();
-      int char_size_shift = is_ascii ? 0 : 1;
+    if (res == RegExpMacroAssemblerIA32::EXCEPTION) {
+      ASSERT(Top::has_pending_exception());
+      return Handle<Object>::null();
+    }
+    rc = (res == RegExpMacroAssemblerIA32::SUCCESS);
 
-      RegExpMacroAssemblerIA32::Result res;
-
-      if (flatshape.IsExternal()) {
-        const byte* address;
-        if (is_ascii) {
-          ExternalAsciiString* ext = ExternalAsciiString::cast(*subject);
-          address = reinterpret_cast<const byte*>(ext->resource()->data());
-        } else {
-          ExternalTwoByteString* ext = ExternalTwoByteString::cast(*subject);
-          address = reinterpret_cast<const byte*>(ext->resource()->data());
-        }
-        res = RegExpMacroAssemblerIA32::Execute(
-            *code,
-            const_cast<Address*>(&address),
-            start_offset << char_size_shift,
-            end_offset << char_size_shift,
-            offsets_vector,
-            previous_index == 0);
-      } else {  // Sequential string
-        ASSERT(StringShape(*subject).IsSequential());
-        Address char_address =
-            is_ascii ? SeqAsciiString::cast(*subject)->GetCharsAddress()
-                     : SeqTwoByteString::cast(*subject)->GetCharsAddress();
-        int byte_offset = char_address - reinterpret_cast<Address>(*subject);
-        res = RegExpMacroAssemblerIA32::Execute(
-            *code,
-            reinterpret_cast<Address*>(subject.location()),
-            byte_offset + (start_offset << char_size_shift),
-            byte_offset + (end_offset << char_size_shift),
-            offsets_vector,
-            previous_index == 0);
-      }
-
-      if (res == RegExpMacroAssemblerIA32::EXCEPTION) {
-        ASSERT(Top::has_pending_exception());
-        return Handle<Object>::null();
-      }
-      rc = (res == RegExpMacroAssemblerIA32::SUCCESS);
-
-      if (rc) {
-        // Capture values are relative to start_offset only.
-        for (int i = 0; i < offsets_vector_length; i++) {
-          if (offsets_vector[i] >= 0) {
-            offsets_vector[i] += previous_index;
-          }
+    if (rc) {
+      // Capture values are relative to start_offset only.
+      for (int i = 0; i < offsets_vector_length; i++) {
+        if (offsets_vector[i] >= 0) {
+          offsets_vector[i] += previous_index;
         }
       }
-      break;
+    }
+  } else {
 #else
-      UNIMPLEMENTED();
-      rc = false;
-      break;
+  // Unimplemented on ARM, fall through to bytecode.
+  }
+  {
 #endif
+    for (int i = number_of_capture_registers - 1; i >= 0; i--) {
+      offsets_vector[i] = -1;
     }
-    case RegExpMacroAssembler::kBytecodeImplementation: {
-      for (int i = (num_captures + 1) * 2 - 1; i >= 0; i--) {
-        offsets_vector[i] = -1;
-      }
-      Handle<ByteArray> byte_codes = IrregexpByteCode(irregexp);
+    Handle<ByteArray> byte_codes(IrregexpByteCode(*regexp, is_ascii));
 
-      rc = IrregexpInterpreter::Match(byte_codes,
-                                      subject,
-                                      offsets_vector,
-                                      previous_index);
-      break;
-    }
-    case RegExpMacroAssembler::kARMImplementation:
-    default:
-      UNREACHABLE();
-      rc = false;
-      break;
+    rc = IrregexpInterpreter::Match(byte_codes,
+                                    subject,
+                                    offsets_vector,
+                                    previous_index);
   }
 
   if (!rc) {
     return Factory::null_value();
   }
 
-  Handle<FixedArray> array = Factory::NewFixedArray(2 * (num_captures+1));
+  FixedArray* array = last_match_info->elements();
+  ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
   // The captures come in (start, end+1) pairs.
-  for (int i = 0; i < 2 * (num_captures + 1); i += 2) {
-    array->set(i, Smi::FromInt(offsets_vector[i]));
-    array->set(i + 1, Smi::FromInt(offsets_vector[i + 1]));
+  for (int i = 0; i < number_of_capture_registers; i += 2) {
+    SetCapture(array, i, offsets_vector[i]);
+    SetCapture(array, i + 1, offsets_vector[i + 1]);
   }
-  return Factory::NewJSArrayWithElements(array);
+  SetLastCaptureCount(array, number_of_capture_registers);
+  SetLastSubject(array, *original_subject);
+  SetLastInput(array, *original_subject);
+  return last_match_info;
 }
 
 
 // -------------------------------------------------------------------
-// Implmentation of the Irregexp regular expression engine.
+// Implementation of the Irregexp regular expression engine.
 //
 // The Irregexp regular expression engine is intended to be a complete
 // implementation of ECMAScript regular expressions.  It generates either
@@ -892,10 +988,10 @@
     return next_register_++;
   }
 
-  Handle<FixedArray> Assemble(RegExpMacroAssembler* assembler,
-                              RegExpNode* start,
-                              int capture_count,
-                              Handle<String> pattern);
+  RegExpEngine::CompilationResult Assemble(RegExpMacroAssembler* assembler,
+                                           RegExpNode* start,
+                                           int capture_count,
+                                           Handle<String> pattern);
 
   inline void AddWork(RegExpNode* node) { work_list_->Add(node); }
 
@@ -940,15 +1036,8 @@
 };
 
 
-static Handle<FixedArray> IrregexpRegExpTooBig(Handle<String> pattern) {
-  Handle<JSArray> array = Factory::NewJSArray(2);
-  SetElement(array, 0, pattern);
-  const char* message = "RegExp too big";
-  SetElement(array, 1, Factory::NewStringFromUtf8(CStrVector(message)));
-  Handle<Object> regexp_err =
-      Factory::NewSyntaxError("malformed_regexp", array);
-  Top::Throw(*regexp_err);
-  return Handle<FixedArray>();
+static RegExpEngine::CompilationResult IrregexpRegExpTooBig() {
+  return RegExpEngine::CompilationResult("RegExp too big");
 }
 
 
@@ -966,7 +1055,7 @@
 }
 
 
-Handle<FixedArray> RegExpCompiler::Assemble(
+RegExpEngine::CompilationResult RegExpCompiler::Assemble(
     RegExpMacroAssembler* macro_assembler,
     RegExpNode* start,
     int capture_count,
@@ -988,24 +1077,17 @@
   while (!work_list.is_empty()) {
     work_list.RemoveLast()->Emit(this, &new_trace);
   }
-  if (reg_exp_too_big_) return IrregexpRegExpTooBig(pattern);
-  Handle<FixedArray> array =
-      Factory::NewFixedArray(RegExpImpl::kIrregexpDataLength);
-  array->set(RegExpImpl::kIrregexpImplementationIndex,
-             Smi::FromInt(macro_assembler_->Implementation()));
-  array->set(RegExpImpl::kIrregexpNumberOfRegistersIndex,
-             Smi::FromInt(next_register_));
-  array->set(RegExpImpl::kIrregexpNumberOfCapturesIndex,
-             Smi::FromInt(capture_count));
+  if (reg_exp_too_big_) return IrregexpRegExpTooBig();
+
   Handle<Object> code = macro_assembler_->GetCode(pattern);
-  array->set(RegExpImpl::kIrregexpCodeIndex, *code);
+
   work_list_ = NULL;
 #ifdef DEBUG
   if (FLAG_trace_regexp_assembler) {
     delete macro_assembler_;
   }
 #endif
-  return array;
+  return RegExpEngine::CompilationResult(*code, next_register_);
 }
 
 
@@ -3723,9 +3805,6 @@
   //               |
   //   [if r >= f] \----> ...
   //
-  //
-  // TODO(someone): clear captures on repetition and handle empty
-  //   matches.
 
   // 15.10.2.5 RepeatMatcher algorithm.
   // The parser has already eliminated the case where max is 0.  In the case
@@ -4592,13 +4671,13 @@
 }
 
 
-Handle<FixedArray> RegExpEngine::Compile(RegExpCompileData* data,
-                                         bool ignore_case,
-                                         bool is_multiline,
-                                         Handle<String> pattern,
-                                         bool is_ascii) {
+RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
+                                                      bool ignore_case,
+                                                      bool is_multiline,
+                                                      Handle<String> pattern,
+                                                      bool is_ascii) {
   if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
-    return IrregexpRegExpTooBig(pattern);
+    return IrregexpRegExpTooBig();
   }
   RegExpCompiler compiler(data->capture_count, ignore_case, is_ascii);
   // Wrap the body of the regexp in capture #0.
diff --git a/src/jsregexp.h b/src/jsregexp.h
index fbacff3..bb28f06 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -51,6 +51,7 @@
   // Parses the RegExp pattern and prepares the JSRegExp object with
   // generic data and choice of implementation - as well as what
   // the implementation wants to store in the data field.
+  // Returns false if compilation fails.
   static Handle<Object> Compile(Handle<JSRegExp> re,
                                 Handle<String> pattern,
                                 Handle<String> flags);
@@ -59,38 +60,46 @@
   // This function calls the garbage collector if necessary.
   static Handle<Object> Exec(Handle<JSRegExp> regexp,
                              Handle<String> subject,
-                             Handle<Object> index);
+                             int index,
+                             Handle<JSArray> lastMatchInfo);
 
   // Call RegExp.prototyp.exec(string) in a loop.
   // Used by String.prototype.match and String.prototype.replace.
   // This function calls the garbage collector if necessary.
   static Handle<Object> ExecGlobal(Handle<JSRegExp> regexp,
-                                   Handle<String> subject);
+                                   Handle<String> subject,
+                                   Handle<JSArray> lastMatchInfo);
 
   // Prepares a JSRegExp object with Irregexp-specific data.
-  static Handle<Object> IrregexpPrepare(Handle<JSRegExp> re,
-                                        Handle<String> pattern,
-                                        JSRegExp::Flags flags);
+  static void IrregexpPrepare(Handle<JSRegExp> re,
+                              Handle<String> pattern,
+                              JSRegExp::Flags flags,
+                              int capture_register_count);
 
 
-  static Handle<Object> AtomCompile(Handle<JSRegExp> re,
-                                    Handle<String> pattern,
-                                    JSRegExp::Flags flags,
-                                    Handle<String> match_pattern);
+  static void AtomCompile(Handle<JSRegExp> re,
+                          Handle<String> pattern,
+                          JSRegExp::Flags flags,
+                          Handle<String> match_pattern);
+
   static Handle<Object> AtomExec(Handle<JSRegExp> regexp,
                                  Handle<String> subject,
-                                 Handle<Object> index);
+                                 int index,
+                                 Handle<JSArray> lastMatchInfo);
 
   static Handle<Object> AtomExecGlobal(Handle<JSRegExp> regexp,
-                                       Handle<String> subject);
+                                       Handle<String> subject,
+                                       Handle<JSArray> lastMatchInfo);
 
   // Execute an Irregexp bytecode pattern.
   static Handle<Object> IrregexpExec(Handle<JSRegExp> regexp,
                                      Handle<String> subject,
-                                     Handle<Object> index);
+                                     int index,
+                                     Handle<JSArray> lastMatchInfo);
 
   static Handle<Object> IrregexpExecGlobal(Handle<JSRegExp> regexp,
-                                           Handle<String> subject);
+                                           Handle<String> subject,
+                                           Handle<JSArray> lastMatchInfo);
 
   static void NewSpaceCollectionPrologue();
   static void OldSpaceCollectionPrologue();
@@ -101,26 +110,54 @@
   static Handle<String> StringToTwoByte(Handle<String> pattern);
   static Handle<String> CachedStringToTwoByte(Handle<String> pattern);
 
-  static const int kIrregexpImplementationIndex = 0;
-  static const int kIrregexpNumberOfCapturesIndex = 1;
-  static const int kIrregexpNumberOfRegistersIndex = 2;
-  static const int kIrregexpCodeIndex = 3;
-  static const int kIrregexpDataLength = 4;
+  // Offsets in the lastMatchInfo array.
+  static const int kLastCaptureCount = 0;
+  static const int kLastSubject = 1;
+  static const int kLastInput = 2;
+  static const int kFirstCapture = 1;
+  static const int kLastMatchOverhead = 3;
+
+  static int GetCapture(FixedArray* array, int index) {
+    return Smi::cast(array->get(index + kFirstCapture))->value();
+  }
+
+  static void SetLastCaptureCount(FixedArray* array, int to) {
+    array->set(kLastCaptureCount, Smi::FromInt(to));
+  }
+
+  static void SetLastSubject(FixedArray* array, String* to) {
+    int capture_count = GetLastCaptureCount(array);
+    array->set(capture_count + kLastSubject, to);
+  }
+
+  static void SetLastInput(FixedArray* array, String* to) {
+    int capture_count = GetLastCaptureCount(array);
+    array->set(capture_count + kLastInput, to);
+  }
+
+  static void SetCapture(FixedArray* array, int index, int to) {
+    array->set(index + kFirstCapture, Smi::FromInt(to));
+  }
 
  private:
   static String* last_ascii_string_;
   static String* two_byte_cached_string_;
 
-  static int IrregexpNumberOfCaptures(Handle<FixedArray> re);
-  static int IrregexpNumberOfRegisters(Handle<FixedArray> re);
-  static Handle<ByteArray> IrregexpByteCode(Handle<FixedArray> re);
-  static Handle<Code> IrregexpNativeCode(Handle<FixedArray> re);
+  static bool EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii);
+
+  static int IrregexpMaxRegisterCount(FixedArray* re);
+  static void SetIrregexpMaxRegisterCount(FixedArray* re, int value);
+  static int IrregexpNumberOfCaptures(FixedArray* re);
+  static int IrregexpNumberOfRegisters(FixedArray* re);
+  static ByteArray* IrregexpByteCode(FixedArray* re, bool is_ascii);
+  static Code* IrregexpNativeCode(FixedArray* re, bool is_ascii);
 
   // On a successful match, the result is a JSArray containing
   // captured positions. On a failure, the result is the null value.
   // Returns an empty handle in case of an exception.
   static Handle<Object> IrregexpExecOnce(Handle<FixedArray> regexp,
                                          int num_captures,
+                                         Handle<JSArray> lastMatchInfo,
                                          Handle<String> subject16,
                                          int previous_index,
                                          int* ovector,
@@ -134,6 +171,10 @@
                               int character_position,
                               int utf8_position);
 
+  // Used to access the lastMatchInfo array.
+  static int GetLastCaptureCount(FixedArray* array) {
+    return Smi::cast(array->get(kLastCaptureCount))->value();
+  }
   // A one element cache of the last utf8_subject string and its length.  The
   // subject JS String object is cached in the heap.  We also cache a
   // translation between position and utf8 position.
@@ -1319,11 +1360,25 @@
 
 class RegExpEngine: public AllStatic {
  public:
-  static Handle<FixedArray> Compile(RegExpCompileData* input,
-                                    bool ignore_case,
-                                    bool multiline,
-                                    Handle<String> pattern,
-                                    bool is_ascii);
+  struct CompilationResult {
+    explicit CompilationResult(const char* error_message)
+        : error_message(error_message),
+          code(Heap::the_hole_value()),
+          num_registers(0) {}
+    CompilationResult(Object* code, int registers)
+      : error_message(NULL),
+        code(code),
+        num_registers(registers) {}
+    const char* error_message;
+    Object* code;
+    int num_registers;
+  };
+
+  static CompilationResult Compile(RegExpCompileData* input,
+                                   bool ignore_case,
+                                   bool multiline,
+                                   Handle<String> pattern,
+                                   bool is_ascii);
 
   static void DotPrint(const char* label, RegExpNode* node, bool ignore_case);
 };
diff --git a/src/jump-target-arm.cc b/src/jump-target-arm.cc
new file mode 100644
index 0000000..b7aad93
--- /dev/null
+++ b/src/jump-target-arm.cc
@@ -0,0 +1,258 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "jump-target.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ masm_->
+
+void JumpTarget::Jump() {
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->has_valid_frame());
+  // Live non-frame registers are not allowed at unconditional jumps
+  // because we have no way of invalidating the corresponding results
+  // which are still live in the C++ code.
+  ASSERT(cgen_->HasValidEntryRegisters());
+
+  if (is_bound()) {
+    // Backward jump.  There is an expected frame to merge to.
+    ASSERT(direction_ == BIDIRECTIONAL);
+    cgen_->frame()->MergeTo(entry_frame_);
+    cgen_->DeleteFrame();
+    __ jmp(&entry_label_);
+  } else {
+    // Forward jump.  The current frame is added to the end of the list
+    // of frames reaching the target block and a jump to the merge code
+    // is emitted.
+    AddReachingFrame(cgen_->frame());
+    RegisterFile empty;
+    cgen_->SetFrame(NULL, &empty);
+    __ jmp(&merge_labels_.last());
+  }
+
+  is_linked_ = !is_bound_;
+}
+
+
+void JumpTarget::Branch(Condition cc, Hint ignored) {
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->has_valid_frame());
+
+  if (is_bound()) {
+    // Backward branch.  We have an expected frame to merge to on the
+    // backward edge.  We negate the condition and emit the merge code
+    // here.
+    //
+    // TODO(210): we should try to avoid negating the condition in the
+    // case where there is no merge code to emit.  Otherwise, we emit
+    // a branch around an unconditional jump.
+    ASSERT(direction_ == BIDIRECTIONAL);
+    Label original_fall_through;
+    __ b(NegateCondition(cc), &original_fall_through);
+    // Swap the current frame for a copy of it, saving non-frame
+    // register reference counts and invalidating all non-frame register
+    // references except the reserved ones on the backward edge.
+    VirtualFrame* original_frame = cgen_->frame();
+    VirtualFrame* working_frame = new VirtualFrame(original_frame);
+    RegisterFile non_frame_registers = RegisterAllocator::Reserved();
+    cgen_->SetFrame(working_frame, &non_frame_registers);
+
+    working_frame->MergeTo(entry_frame_);
+    cgen_->DeleteFrame();
+    __ jmp(&entry_label_);
+
+    // Restore the frame and its associated non-frame registers.
+    cgen_->SetFrame(original_frame, &non_frame_registers);
+    __ bind(&original_fall_through);
+  } else {
+    // Forward branch.  A copy of the current frame is added to the end
+    // of the list of frames reaching the target block and a branch to
+    // the merge code is emitted.
+    AddReachingFrame(new VirtualFrame(cgen_->frame()));
+    __ b(cc, &merge_labels_.last());
+  }
+
+  is_linked_ = !is_bound_;
+}
+
+
+void JumpTarget::Call() {
+  // Call is used to push the address of the catch block on the stack as
+  // a return address when compiling try/catch and try/finally.  We
+  // fully spill the frame before making the call.  The expected frame
+  // at the label (which should be the only one) is the spilled current
+  // frame plus an in-memory return address.  The "fall-through" frame
+  // at the return site is the spilled current frame.
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->has_valid_frame());
+  // There are no non-frame references across the call.
+  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(!is_linked());
+
+  cgen_->frame()->SpillAll();
+  VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
+  target_frame->Adjust(1);
+  AddReachingFrame(target_frame);
+  __ bl(&merge_labels_.last());
+
+  is_linked_ = !is_bound_;
+}
+
+
+void JumpTarget::Bind(int mergable_elements) {
+  ASSERT(cgen_ != NULL);
+  ASSERT(!is_bound());
+
+  // Live non-frame registers are not allowed at the start of a basic
+  // block.
+  ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
+
+  // Compute the frame to use for entry to the block.
+  ComputeEntryFrame(mergable_elements);
+
+  if (is_linked()) {
+    // There were forward jumps.  Handle merging the reaching frames
+    // and possible fall through to the entry frame.
+
+    // Some moves required to merge to an expected frame require
+    // purely frame state changes, and do not require any code
+    // generation.  Perform those first to increase the possibility of
+    // finding equal frames below.
+    if (cgen_->has_valid_frame()) {
+      cgen_->frame()->PrepareMergeTo(entry_frame_);
+    }
+    for (int i = 0; i < reaching_frames_.length(); i++) {
+      reaching_frames_[i]->PrepareMergeTo(entry_frame_);
+    }
+
+    // If there is a fall through to the jump target and it needs
+    // merge code, process it first.
+    if (cgen_->has_valid_frame() && !cgen_->frame()->Equals(entry_frame_)) {
+      // Loop over all the reaching frames, looking for any that can
+      // share merge code with this one.
+      for (int i = 0; i < reaching_frames_.length(); i++) {
+        if (cgen_->frame()->Equals(reaching_frames_[i])) {
+          // Set the reaching frames element to null to avoid
+          // processing it later, and then bind its entry label.
+          delete reaching_frames_[i];
+          reaching_frames_[i] = NULL;
+          __ bind(&merge_labels_[i]);
+        }
+      }
+
+      // Emit the merge code.
+      cgen_->frame()->MergeTo(entry_frame_);
+    }
+
+    // Loop over the (non-null) reaching frames and process any that
+    // need merge code.
+    for (int i = 0; i < reaching_frames_.length(); i++) {
+      VirtualFrame* frame = reaching_frames_[i];
+      if (frame != NULL && !frame->Equals(entry_frame_)) {
+        // Set the reaching frames element to null to avoid processing
+        // it later.  Do not delete it as it is needed for merging.
+        reaching_frames_[i] = NULL;
+
+        // If the code generator has a current frame (a fall-through
+        // or a previously merged frame), insert a jump around the
+        // merge code we are about to generate.
+        if (cgen_->has_valid_frame()) {
+          cgen_->DeleteFrame();
+          __ jmp(&entry_label_);
+        }
+
+        // Set the frame to merge as the code generator's current
+        // frame and bind its merge label.
+        RegisterFile reserved_registers = RegisterAllocator::Reserved();
+        cgen_->SetFrame(frame, &reserved_registers);
+        __ bind(&merge_labels_[i]);
+
+        // Loop over the remaining (non-null) reaching frames, looking
+        // for any that can share merge code with this one.
+        for (int j = i + 1; j < reaching_frames_.length(); j++) {
+          VirtualFrame* other = reaching_frames_[j];
+          if (other != NULL && frame->Equals(other)) {
+            delete other;
+            reaching_frames_[j] = NULL;
+            __ bind(&merge_labels_[j]);
+          }
+        }
+
+        // Emit the merge code.
+        cgen_->frame()->MergeTo(entry_frame_);
+      }
+    }
+
+    // The code generator may not have a current frame if there was no
+    // fall through and none of the reaching frames needed merging.
+    // In that case, clone the entry frame as the current frame.
+    if (!cgen_->has_valid_frame()) {
+      RegisterFile reserved_registers = RegisterAllocator::Reserved();
+      cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
+    }
+
+    // There is certainly a current frame equal to the entry frame.
+    // Bind the entry frame label.
+    __ bind(&entry_label_);
+
+    // There may be unprocessed reaching frames that did not need
+    // merge code.  Bind their merge labels to be the same as the
+    // entry label.
+    for (int i = 0; i < reaching_frames_.length(); i++) {
+      if (reaching_frames_[i] != NULL) {
+        delete reaching_frames_[i];
+        __ bind(&merge_labels_[i]);
+      }
+    }
+
+    // All the reaching frames except the one that is the current
+    // frame (if it is one of the reaching frames) have been deleted.
+    reaching_frames_.Clear();
+    merge_labels_.Clear();
+
+  } else {
+    // There were no forward jumps.  The current frame is merged to
+    // the entry frame.
+    cgen_->frame()->MergeTo(entry_frame_);
+    __ bind(&entry_label_);
+  }
+
+  is_linked_ = false;
+  is_bound_ = true;
+}
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/jump-target-ia32.cc b/src/jump-target-ia32.cc
new file mode 100644
index 0000000..12eb26f
--- /dev/null
+++ b/src/jump-target-ia32.cc
@@ -0,0 +1,258 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "jump-target.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ masm_->
+
+void JumpTarget::Jump() {
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->has_valid_frame());
+  // Live non-frame registers are not allowed at unconditional jumps
+  // because we have no way of invalidating the corresponding results
+  // which are still live in the C++ code.
+  ASSERT(cgen_->HasValidEntryRegisters());
+
+  if (is_bound()) {
+    // Backward jump.  There is an expected frame to merge to.
+    ASSERT(direction_ == BIDIRECTIONAL);
+    cgen_->frame()->MergeTo(entry_frame_);
+    cgen_->DeleteFrame();
+    __ jmp(&entry_label_);
+  } else {
+    // Forward jump.  The current frame is added to the end of the list
+    // of frames reaching the target block and a jump to the merge code
+    // is emitted.
+    AddReachingFrame(cgen_->frame());
+    RegisterFile empty;
+    cgen_->SetFrame(NULL, &empty);
+    __ jmp(&merge_labels_.last());
+  }
+
+  is_linked_ = !is_bound_;
+}
+
+
+void JumpTarget::Branch(Condition cc, Hint hint) {
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->has_valid_frame());
+
+  if (is_bound()) {
+    // Backward branch.  We have an expected frame to merge to on the
+    // backward edge.  We negate the condition and emit the merge code
+    // here.
+    //
+    // TODO(210): we should try to avoid negating the condition in the
+    // case where there is no merge code to emit.  Otherwise, we emit
+    // a branch around an unconditional jump.
+    ASSERT(direction_ == BIDIRECTIONAL);
+    Label original_fall_through;
+    __ j(NegateCondition(cc), &original_fall_through, NegateHint(hint));
+    // Swap the current frame for a copy of it, saving non-frame
+    // register reference counts and invalidating all non-frame register
+    // references except the reserved ones on the backward edge.
+    VirtualFrame* original_frame = cgen_->frame();
+    VirtualFrame* working_frame = new VirtualFrame(original_frame);
+    RegisterFile non_frame_registers = RegisterAllocator::Reserved();
+    cgen_->SetFrame(working_frame, &non_frame_registers);
+
+    working_frame->MergeTo(entry_frame_);
+    cgen_->DeleteFrame();
+    __ jmp(&entry_label_);
+
+    // Restore the frame and its associated non-frame registers.
+    cgen_->SetFrame(original_frame, &non_frame_registers);
+    __ bind(&original_fall_through);
+  } else {
+    // Forward branch.  A copy of the current frame is added to the end
+    // of the list of frames reaching the target block and a branch to
+    // the merge code is emitted.
+    AddReachingFrame(new VirtualFrame(cgen_->frame()));
+    __ j(cc, &merge_labels_.last(), hint);
+  }
+
+  is_linked_ = !is_bound_;
+}
+
+
+void JumpTarget::Call() {
+  // Call is used to push the address of the catch block on the stack as
+  // a return address when compiling try/catch and try/finally.  We
+  // fully spill the frame before making the call.  The expected frame
+  // at the label (which should be the only one) is the spilled current
+  // frame plus an in-memory return address.  The "fall-through" frame
+  // at the return site is the spilled current frame.
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->has_valid_frame());
+  // There are no non-frame references across the call.
+  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(!is_linked());
+
+  cgen_->frame()->SpillAll();
+  VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
+  target_frame->Adjust(1);
+  AddReachingFrame(target_frame);
+  __ call(&merge_labels_.last());
+
+  is_linked_ = !is_bound_;
+}
+
+
+void JumpTarget::Bind(int mergable_elements) {
+  ASSERT(cgen_ != NULL);
+  ASSERT(!is_bound());
+
+  // Live non-frame registers are not allowed at the start of a basic
+  // block.
+  ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
+
+  // Compute the frame to use for entry to the block.
+  ComputeEntryFrame(mergable_elements);
+
+  if (is_linked()) {
+    // There were forward jumps.  Handle merging the reaching frames
+    // and possible fall through to the entry frame.
+
+    // Some moves required to merge to an expected frame require
+    // purely frame state changes, and do not require any code
+    // generation.  Perform those first to increase the possibility of
+    // finding equal frames below.
+    if (cgen_->has_valid_frame()) {
+      cgen_->frame()->PrepareMergeTo(entry_frame_);
+    }
+    for (int i = 0; i < reaching_frames_.length(); i++) {
+      reaching_frames_[i]->PrepareMergeTo(entry_frame_);
+    }
+
+    // If there is a fall through to the jump target and it needs
+    // merge code, process it first.
+    if (cgen_->has_valid_frame() && !cgen_->frame()->Equals(entry_frame_)) {
+      // Loop over all the reaching frames, looking for any that can
+      // share merge code with this one.
+      for (int i = 0; i < reaching_frames_.length(); i++) {
+        if (cgen_->frame()->Equals(reaching_frames_[i])) {
+          // Set the reaching frames element to null to avoid
+          // processing it later, and then bind its entry label.
+          delete reaching_frames_[i];
+          reaching_frames_[i] = NULL;
+          __ bind(&merge_labels_[i]);
+        }
+      }
+
+      // Emit the merge code.
+      cgen_->frame()->MergeTo(entry_frame_);
+    }
+
+    // Loop over the (non-null) reaching frames and process any that
+    // need merge code.
+    for (int i = 0; i < reaching_frames_.length(); i++) {
+      VirtualFrame* frame = reaching_frames_[i];
+      if (frame != NULL && !frame->Equals(entry_frame_)) {
+        // Set the reaching frames element to null to avoid processing
+        // it later.  Do not delete it as it is needed for merging.
+        reaching_frames_[i] = NULL;
+
+        // If the code generator has a current frame (a fall-through
+        // or a previously merged frame), insert a jump around the
+        // merge code we are about to generate.
+        if (cgen_->has_valid_frame()) {
+          cgen_->DeleteFrame();
+          __ jmp(&entry_label_);
+        }
+
+        // Set the frame to merge as the code generator's current
+        // frame and bind its merge label.
+        RegisterFile reserved_registers = RegisterAllocator::Reserved();
+        cgen_->SetFrame(frame, &reserved_registers);
+        __ bind(&merge_labels_[i]);
+
+        // Loop over the remaining (non-null) reaching frames, looking
+        // for any that can share merge code with this one.
+        for (int j = i + 1; j < reaching_frames_.length(); j++) {
+          VirtualFrame* other = reaching_frames_[j];
+          if (other != NULL && frame->Equals(other)) {
+            delete other;
+            reaching_frames_[j] = NULL;
+            __ bind(&merge_labels_[j]);
+          }
+        }
+
+        // Emit the merge code.
+        cgen_->frame()->MergeTo(entry_frame_);
+      }
+    }
+
+    // The code generator may not have a current frame if there was no
+    // fall through and none of the reaching frames needed merging.
+    // In that case, clone the entry frame as the current frame.
+    if (!cgen_->has_valid_frame()) {
+      RegisterFile reserved_registers = RegisterAllocator::Reserved();
+      cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
+    }
+
+    // There is certainly a current frame equal to the entry frame.
+    // Bind the entry frame label.
+    __ bind(&entry_label_);
+
+    // There may be unprocessed reaching frames that did not need
+    // merge code.  Bind their merge labels to be the same as the
+    // entry label.
+    for (int i = 0; i < reaching_frames_.length(); i++) {
+      if (reaching_frames_[i] != NULL) {
+        delete reaching_frames_[i];
+        __ bind(&merge_labels_[i]);
+      }
+    }
+
+    // All the reaching frames except the one that is the current
+    // frame (if it is one of the reaching frames) have been deleted.
+    reaching_frames_.Clear();
+    merge_labels_.Clear();
+
+  } else {
+    // There were no forward jumps.  The current frame is merged to
+    // the entry frame.
+    cgen_->frame()->MergeTo(entry_frame_);
+    __ bind(&entry_label_);
+  }
+
+  is_linked_ = false;
+  is_bound_ = true;
+}
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/jump-target.cc b/src/jump-target.cc
new file mode 100644
index 0000000..8b8d4a0
--- /dev/null
+++ b/src/jump-target.cc
@@ -0,0 +1,657 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "jump-target.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+JumpTarget::JumpTarget(CodeGenerator* cgen, Directionality direction)
+    : cgen_(cgen),
+      direction_(direction),
+      reaching_frames_(0),
+      merge_labels_(0),
+      entry_frame_(NULL),
+      is_bound_(false),
+      is_linked_(false) {
+  ASSERT(cgen != NULL);
+  masm_ = cgen->masm();
+}
+
+
+JumpTarget::JumpTarget()
+    : cgen_(NULL),
+      masm_(NULL),
+      direction_(FORWARD_ONLY),
+      reaching_frames_(0),
+      merge_labels_(0),
+      entry_frame_(NULL),
+      is_bound_(false),
+      is_linked_(false) {
+}
+
+
+void JumpTarget::Initialize(CodeGenerator* cgen, Directionality direction) {
+  ASSERT(cgen != NULL);
+  ASSERT(cgen_ == NULL);
+  cgen_ = cgen;
+  masm_ = cgen->masm();
+  direction_ = direction;
+}
+
+
+void JumpTarget::Unuse() {
+  ASSERT(!is_linked());
+#ifdef DEBUG
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    ASSERT(reaching_frames_[i] == NULL);
+  }
+#endif
+  delete entry_frame_;
+
+  Reset();
+}
+
+
+void JumpTarget::Reset() {
+  reaching_frames_.Clear();
+  merge_labels_.Clear();
+  entry_frame_ = NULL;
+  entry_label_.Unuse();
+  is_bound_ = false;
+  is_linked_ = false;
+}
+
+
+FrameElement* JumpTarget::Combine(FrameElement* left, FrameElement* right) {
+  // Given a pair of non-null frame element pointers, return one of
+  // them as an entry frame candidate or null if they are
+  // incompatible.
+
+  // If either is invalid, the result is.
+  if (!left->is_valid()) return left;
+  if (!right->is_valid()) return right;
+
+  // If they have the same value, the result is the same.  (Exception:
+  // bidirectional frames cannot have constants or copies.)  If either
+  // is unsynced, the result is.
+  if (left->is_memory() && right->is_memory()) return left;
+
+  if (left->is_register() && right->is_register() &&
+      left->reg().is(right->reg())) {
+    if (!left->is_synced()) {
+      return left;
+    } else {
+      return right;
+    }
+  }
+
+  if (direction_ == FORWARD_ONLY &&
+      left->is_constant() &&
+      right->is_constant() &&
+      left->handle().is_identical_to(right->handle())) {
+    if (!left->is_synced()) {
+      return left;
+    } else {
+      return right;
+    }
+  }
+
+  if (direction_ == FORWARD_ONLY &&
+      left->is_copy() &&
+      right->is_copy() &&
+      left->index() == right->index()) {
+    if (!left->is_synced()) {
+      return left;
+    } else {
+      return right;
+    }
+  }
+
+  // Otherwise they are incompatible and we will reallocate them.
+  return NULL;
+}
+
+
+void JumpTarget::ComputeEntryFrame(int mergable_elements) {
+  // Given: a collection of frames reaching by forward CFG edges
+  // (including the code generator's current frame) and the
+  // directionality of the block.  Compute: an entry frame for the
+  // block.
+
+  // Choose an initial frame, either the code generator's current
+  // frame if there is one, or the first reaching frame if not.
+  VirtualFrame* initial_frame = cgen_->frame();
+  int start_index = 0;  // Begin iteration with the 1st reaching frame.
+  if (initial_frame == NULL) {
+    initial_frame = reaching_frames_[0];
+    start_index = 1;  // Begin iteration with the 2nd reaching frame.
+  }
+
+  // A list of pointers to frame elements in the entry frame.  NULL
+  // indicates that the element has not yet been determined.
+  int length = initial_frame->elements_.length();
+  List<FrameElement*> elements(length);
+
+  // Convert the number of mergable elements (counted from the top
+  // down) to a frame high-water mark (counted from the bottom up).
+  // Elements strictly above the high-water index will be mergable in
+  // entry frames for bidirectional jump targets.
+  int high_water_mark = (mergable_elements == kAllElements)
+      ? VirtualFrame::kIllegalIndex  // All frame indices are above this.
+      : length - mergable_elements - 1;  // Top index if m_e == 0.
+
+  // Initially populate the list of elements based on the initial
+  // frame.
+  for (int i = 0; i < length; i++) {
+    FrameElement element = initial_frame->elements_[i];
+    // We do not allow copies or constants in bidirectional frames.
+    if (direction_ == BIDIRECTIONAL &&
+        i > high_water_mark &&
+        (element.is_constant() || element.is_copy())) {
+      elements.Add(NULL);
+    } else {
+      elements.Add(&initial_frame->elements_[i]);
+    }
+  }
+
+  // Compute elements based on the other reaching frames.
+  if (start_index < reaching_frames_.length()) {
+    for (int i = 0; i < length; i++) {
+      for (int j = start_index; j < reaching_frames_.length(); j++) {
+        FrameElement* element = elements[i];
+
+        // Element computation is monotonic: new information will not
+        // change our decision about undetermined or invalid elements.
+        if (element == NULL || !element->is_valid()) break;
+
+        elements[i] = Combine(element, &reaching_frames_[j]->elements_[i]);
+      }
+    }
+  }
+
+  // Compute the registers already reserved by values in the frame.
+  // Count the reserved registers to avoid using them.
+  RegisterFile frame_registers = RegisterAllocator::Reserved();
+  for (int i = 0; i < length; i++) {
+    FrameElement* element = elements[i];
+    if (element != NULL && element->is_register()) {
+      frame_registers.Use(element->reg());
+    }
+  }
+
+  // Build the new frame.  The frame already has memory elements for
+  // the parameters (including the receiver) and the return address.
+  // We will fill it up with memory elements.
+  entry_frame_ = new VirtualFrame(cgen_);
+  while (entry_frame_->elements_.length() < length) {
+    entry_frame_->elements_.Add(FrameElement::MemoryElement());
+  }
+
+
+  // Copy the already-determined frame elements to the entry frame,
+  // and allocate any still-undetermined frame elements to registers
+  // or memory, from the top down.
+  for (int i = length - 1; i >= 0; i--) {
+    if (elements[i] == NULL) {
+      // If the value is synced on all frames, put it in memory.  This
+      // costs nothing at the merge code but will incur a
+      // memory-to-register move when the value is needed later.
+      bool is_synced = initial_frame->elements_[i].is_synced();
+      int j = start_index;
+      while (is_synced && j < reaching_frames_.length()) {
+        is_synced = reaching_frames_[j]->elements_[i].is_synced();
+        j++;
+      }
+      // There is nothing to be done if the elements are all synced.
+      // It is already recorded as a memory element.
+      if (is_synced) continue;
+
+      // Choose an available register.  Prefer ones that the element
+      // is already occupying on some reaching frame.
+      RegisterFile candidate_registers;
+      int max_count = kMinInt;
+      int best_reg_code = no_reg.code_;
+
+      // Consider the initial frame.
+      FrameElement element = initial_frame->elements_[i];
+      if (element.is_register() &&
+          !frame_registers.is_used(element.reg())) {
+        candidate_registers.Use(element.reg());
+        max_count = 1;
+        best_reg_code = element.reg().code();
+      }
+      // Consider the other frames.
+      for (int j = start_index; j < reaching_frames_.length(); j++) {
+        element = reaching_frames_[j]->elements_[i];
+        if (element.is_register() &&
+            !frame_registers.is_used(element.reg())) {
+          candidate_registers.Use(element.reg());
+          if (candidate_registers.count(element.reg()) > max_count) {
+            max_count = candidate_registers.count(element.reg());
+            best_reg_code = element.reg().code();
+          }
+        }
+      }
+      // If there was no preferred choice consider any free register.
+      if (best_reg_code == no_reg.code_) {
+        for (int j = 0; j < kNumRegisters; j++) {
+          if (!frame_registers.is_used(j)) {
+            best_reg_code = j;
+            break;
+          }
+        }
+      }
+
+      // If there was a register choice, use it.  If not do nothing
+      // (the element is already recorded as in memory)
+      if (best_reg_code != no_reg.code_) {
+        Register reg = { best_reg_code };
+        frame_registers.Use(reg);
+        entry_frame_->elements_[i] =
+            FrameElement::RegisterElement(reg,
+                                          FrameElement::NOT_SYNCED);
+      }
+    } else {
+      // The element is already determined.
+      entry_frame_->elements_[i] = *elements[i];
+    }
+  }
+
+  // Fill in the other fields of the entry frame.
+  entry_frame_->local_count_ = initial_frame->local_count_;
+  entry_frame_->frame_pointer_ = initial_frame->frame_pointer_;
+
+  // The stack pointer is at the highest synced element or the base of
+  // the expression stack.
+  int stack_pointer = length - 1;
+  while (stack_pointer >= entry_frame_->expression_base_index() &&
+         !entry_frame_->elements_[stack_pointer].is_synced()) {
+    stack_pointer--;
+  }
+  entry_frame_->stack_pointer_ = stack_pointer;
+
+  // Unuse the reserved registers---they do not actually appear in
+  // the entry frame.
+  RegisterAllocator::UnuseReserved(&frame_registers);
+  entry_frame_->frame_registers_ = frame_registers;
+}
+
+
+void JumpTarget::Jump(Result* arg) {
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->has_valid_frame());
+
+  cgen_->frame()->Push(arg);
+  Jump();
+}
+
+
+void JumpTarget::Jump(Result* arg0, Result* arg1) {
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->has_valid_frame());
+
+  cgen_->frame()->Push(arg0);
+  cgen_->frame()->Push(arg1);
+  Jump();
+}
+
+
+void JumpTarget::Jump(Result* arg0, Result* arg1, Result* arg2) {
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->has_valid_frame());
+
+  cgen_->frame()->Push(arg0);
+  cgen_->frame()->Push(arg1);
+  cgen_->frame()->Push(arg2);
+  Jump();
+}
+
+
+#ifdef DEBUG
+#define DECLARE_ARGCHECK_VARS(name)                                \
+  Result::Type name##_type = name->type();                         \
+  Register name##_reg = name->is_register() ? name->reg() : no_reg
+
+#define ASSERT_ARGCHECK(name)                                \
+  ASSERT(name->type() == name##_type);                       \
+  ASSERT(!name->is_register() || name->reg().is(name##_reg))
+
+#else
+#define DECLARE_ARGCHECK_VARS(name) do {} while (false)
+
+#define ASSERT_ARGCHECK(name) do {} while (false)
+#endif
+
+void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->has_valid_frame());
+
+  // We want to check that non-frame registers at the call site stay in
+  // the same registers on the fall-through branch.
+  DECLARE_ARGCHECK_VARS(arg);
+
+  cgen_->frame()->Push(arg);
+  Branch(cc, hint);
+  *arg = cgen_->frame()->Pop();
+
+  ASSERT_ARGCHECK(arg);
+}
+
+
+void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->frame() != NULL);
+
+  // We want to check that non-frame registers at the call site stay in
+  // the same registers on the fall-through branch.
+  DECLARE_ARGCHECK_VARS(arg0);
+  DECLARE_ARGCHECK_VARS(arg1);
+
+  cgen_->frame()->Push(arg0);
+  cgen_->frame()->Push(arg1);
+  Branch(cc, hint);
+  *arg1 = cgen_->frame()->Pop();
+  *arg0 = cgen_->frame()->Pop();
+
+  ASSERT_ARGCHECK(arg0);
+  ASSERT_ARGCHECK(arg1);
+}
+
+
+void JumpTarget::Branch(Condition cc,
+                        Result* arg0,
+                        Result* arg1,
+                        Result* arg2,
+                        Hint hint) {
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->frame() != NULL);
+
+  // We want to check that non-frame registers at the call site stay in
+  // the same registers on the fall-through branch.
+  DECLARE_ARGCHECK_VARS(arg0);
+  DECLARE_ARGCHECK_VARS(arg1);
+  DECLARE_ARGCHECK_VARS(arg2);
+
+  cgen_->frame()->Push(arg0);
+  cgen_->frame()->Push(arg1);
+  cgen_->frame()->Push(arg2);
+  Branch(cc, hint);
+  *arg2 = cgen_->frame()->Pop();
+  *arg1 = cgen_->frame()->Pop();
+  *arg0 = cgen_->frame()->Pop();
+
+  ASSERT_ARGCHECK(arg0);
+  ASSERT_ARGCHECK(arg1);
+  ASSERT_ARGCHECK(arg2);
+}
+
+
+void JumpTarget::Branch(Condition cc,
+                        Result* arg0,
+                        Result* arg1,
+                        Result* arg2,
+                        Result* arg3,
+                        Hint hint) {
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->frame() != NULL);
+
+  // We want to check that non-frame registers at the call site stay in
+  // the same registers on the fall-through branch.
+  DECLARE_ARGCHECK_VARS(arg0);
+  DECLARE_ARGCHECK_VARS(arg1);
+  DECLARE_ARGCHECK_VARS(arg2);
+  DECLARE_ARGCHECK_VARS(arg3);
+
+  cgen_->frame()->Push(arg0);
+  cgen_->frame()->Push(arg1);
+  cgen_->frame()->Push(arg2);
+  cgen_->frame()->Push(arg3);
+  Branch(cc, hint);
+  *arg3 = cgen_->frame()->Pop();
+  *arg2 = cgen_->frame()->Pop();
+  *arg1 = cgen_->frame()->Pop();
+  *arg0 = cgen_->frame()->Pop();
+
+  ASSERT_ARGCHECK(arg0);
+  ASSERT_ARGCHECK(arg1);
+  ASSERT_ARGCHECK(arg2);
+  ASSERT_ARGCHECK(arg3);
+}
+
+#undef DECLARE_ARGCHECK_VARS
+#undef ASSERT_ARGCHECK
+
+
+void JumpTarget::Bind(Result* arg, int mergable_elements) {
+  ASSERT(cgen_ != NULL);
+
+  if (cgen_->has_valid_frame()) {
+    cgen_->frame()->Push(arg);
+  }
+  Bind(mergable_elements);
+  *arg = cgen_->frame()->Pop();
+}
+
+
+void JumpTarget::Bind(Result* arg0, Result* arg1, int mergable_elements) {
+  ASSERT(cgen_ != NULL);
+
+  if (cgen_->has_valid_frame()) {
+    cgen_->frame()->Push(arg0);
+    cgen_->frame()->Push(arg1);
+  }
+  Bind(mergable_elements);
+  *arg1 = cgen_->frame()->Pop();
+  *arg0 = cgen_->frame()->Pop();
+}
+
+
+void JumpTarget::Bind(Result* arg0,
+                      Result* arg1,
+                      Result* arg2,
+                      int mergable_elements) {
+  ASSERT(cgen_ != NULL);
+
+  if (cgen_->has_valid_frame()) {
+    cgen_->frame()->Push(arg0);
+    cgen_->frame()->Push(arg1);
+    cgen_->frame()->Push(arg2);
+  }
+  Bind(mergable_elements);
+  *arg2 = cgen_->frame()->Pop();
+  *arg1 = cgen_->frame()->Pop();
+  *arg0 = cgen_->frame()->Pop();
+}
+
+
+void JumpTarget::Bind(Result* arg0,
+                      Result* arg1,
+                      Result* arg2,
+                      Result* arg3,
+                      int mergable_elements) {
+  ASSERT(cgen_ != NULL);
+
+  if (cgen_->has_valid_frame()) {
+    cgen_->frame()->Push(arg0);
+    cgen_->frame()->Push(arg1);
+    cgen_->frame()->Push(arg2);
+    cgen_->frame()->Push(arg3);
+  }
+  Bind(mergable_elements);
+  *arg3 = cgen_->frame()->Pop();
+  *arg2 = cgen_->frame()->Pop();
+  *arg1 = cgen_->frame()->Pop();
+  *arg0 = cgen_->frame()->Pop();
+}
+
+
+void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
+  ASSERT(reaching_frames_.length() == merge_labels_.length());
+  Label fresh;
+  merge_labels_.Add(fresh);
+  reaching_frames_.Add(frame);
+}
+
+
+// -------------------------------------------------------------------------
+// BreakTarget implementation.
+
+void BreakTarget::Initialize(CodeGenerator* cgen, Directionality direction) {
+  JumpTarget::Initialize(cgen, direction);
+  ASSERT(cgen_->has_valid_frame());
+  expected_height_ = cgen_->frame()->height();
+}
+
+
+void BreakTarget::CopyTo(BreakTarget* destination) {
+  ASSERT(destination != NULL);
+  destination->cgen_ = cgen_;
+  destination->masm_ = masm_;
+  destination->direction_ = direction_;
+  destination->reaching_frames_.Clear();
+  destination->merge_labels_.Clear();
+  ASSERT(reaching_frames_.length() == merge_labels_.length());
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    destination->reaching_frames_.Add(reaching_frames_[i]);
+    destination->merge_labels_.Add(merge_labels_[i]);
+  }
+  destination->entry_frame_ = entry_frame_;
+  destination->entry_label_ = entry_label_;
+  destination->is_bound_ = is_bound_;
+  destination->is_linked_ = is_linked_;
+  destination->expected_height_ = expected_height_;
+}
+
+
+void BreakTarget::Jump() {
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->has_valid_frame());
+
+  // This is a break target so drop leftover statement state from the
+  // frame before merging.
+  cgen_->frame()->ForgetElements(cgen_->frame()->height() - expected_height_);
+  JumpTarget::Jump();
+}
+
+
+void BreakTarget::Branch(Condition cc, Hint hint) {
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->has_valid_frame());
+
+  int count = cgen_->frame()->height() - expected_height_;
+  if (count > 0) {
+    // We negate and branch here rather than using
+    // JumpTarget::Branch's negate and branch.  This gives us a hook
+    // to remove statement state from the frame.
+    JumpTarget fall_through(cgen_);
+    // Branch to fall through will not negate, because it is a
+    // forward-only target.
+    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
+    Jump();  // May emit merge code here.
+    fall_through.Bind();
+  } else {
+    JumpTarget::Branch(cc, hint);
+  }
+}
+
+
+void BreakTarget::Bind(int mergable_elements) {
+#ifdef DEBUG
+  ASSERT(mergable_elements == kAllElements);
+  ASSERT(cgen_ != NULL);
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    ASSERT(reaching_frames_[i]->height() == expected_height_);
+  }
+#endif
+
+  // This is a break target so drop leftover statement state from the
+  // frame before merging.
+  if (cgen_->has_valid_frame()) {
+    int count = cgen_->frame()->height() - expected_height_;
+    cgen_->frame()->ForgetElements(count);
+  }
+  JumpTarget::Bind(mergable_elements);
+}
+
+
+// -------------------------------------------------------------------------
+// ShadowTarget implementation.
+
+ShadowTarget::ShadowTarget(BreakTarget* shadowed) {
+  ASSERT(shadowed != NULL);
+  other_target_ = shadowed;
+
+#ifdef DEBUG
+  is_shadowing_ = true;
+#endif
+  // While shadowing this shadow target saves the state of the original.
+  shadowed->CopyTo(this);
+
+  // The original's state is reset.  We do not Unuse it because that
+  // would delete the expected frame and assert that the target is not
+  // linked.
+  shadowed->Reset();
+  ASSERT(cgen_ != NULL);
+  ASSERT(cgen_->has_valid_frame());
+  shadowed->set_expected_height(cgen_->frame()->height());
+
+  // Setting the code generator to null prevents the shadow target from
+  // being used until shadowing stops.
+  cgen_ = NULL;
+  masm_ = NULL;
+}
+
+
+void ShadowTarget::StopShadowing() {
+  ASSERT(is_shadowing_);
+
+  // This target does not have a valid code generator yet.
+  cgen_ = other_target_->code_generator();
+  ASSERT(cgen_ != NULL);
+  masm_ = cgen_->masm();
+
+  // The states of this target, which was shadowed, and the original
+  // target, which was shadowing, are swapped.
+  BreakTarget temp;
+  other_target_->CopyTo(&temp);
+  CopyTo(other_target_);
+  temp.CopyTo(this);
+  temp.Reset();  // So the destructor does not deallocate virtual frames.
+
+#ifdef DEBUG
+  is_shadowing_ = false;
+#endif
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/jump-target.h b/src/jump-target.h
new file mode 100644
index 0000000..bf4fc99
--- /dev/null
+++ b/src/jump-target.h
@@ -0,0 +1,314 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JUMP_TARGET_H_
+#define V8_JUMP_TARGET_H_
+
+#include "virtual-frame.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Jump targets
+//
+// A jump target is an abstraction of a basic-block entry in generated
+// code.  It collects all the virtual frames reaching the block by
+// forward jumps and pairs them with labels for the merge code along
+// all forward-reaching paths.  When bound, an expected frame for the
+// block is determined and code is generated to merge to the expected
+// frame.  For backward jumps, the merge code is generated at the edge
+// leaving the predecessor block.
+//
+// A jump target must have been reached via control flow (either by
+// jumping, branching, or falling through) at the time it is bound.
+// In particular, this means that at least one of the control-flow
+// graph edges reaching the target must be a forward edge.
+
+class JumpTarget : public Malloced {  // Shadows are dynamically allocated.
+ public:
+  // Forward-only jump targets can only be reached by forward CFG edges.
+  enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
+
+  // Construct a jump target with a given code generator used to generate
+  // code and to provide access to a current frame.
+  explicit JumpTarget(CodeGenerator* cgen,
+                      Directionality direction = FORWARD_ONLY);
+
+  // Construct a jump target without a code generator.  A code
+  // generator must be supplied before using the jump target as a
+  // label.  This is useful, eg, when break targets are embedded in
+  // AST nodes.
+  JumpTarget();
+
+  // Supply a code generator and directionality to an already
+  // constructed jump target.  This function expects to be given a
+  // non-null code generator, and to be called only when the code
+  // generator is not yet set.
+  virtual void Initialize(CodeGenerator* cgen,
+                          Directionality direction = FORWARD_ONLY);
+
+  virtual ~JumpTarget() { Unuse(); }
+
+  // Treat the jump target as a fresh one.  The state is reset and
+  // pointed-to virtual frames are deallocated.  There should be no
+  // dangling jumps to the target.
+  void Unuse();
+
+  // Reset the internal state of this jump target.  Pointed-to virtual
+  // frames are not deallocated and dangling jumps to the target are
+  // left dangling.
+  void Reset();
+
+  // Accessors.
+  CodeGenerator* code_generator() const { return cgen_; }
+
+  Label* entry_label() { return &entry_label_; }
+
+  VirtualFrame* entry_frame() const { return entry_frame_; }
+  void set_entry_frame(VirtualFrame* frame) {
+    entry_frame_ = frame;
+  }
+
+  void make_bidirectional() { direction_ = BIDIRECTIONAL; }
+
+  // Predicates testing the state of the encapsulated label.
+  bool is_bound() const { return is_bound_; }
+  bool is_linked() const { return is_linked_; }
+  bool is_unused() const { return !is_bound() && !is_linked(); }
+
+  // Emit a jump to the target.  There must be a current frame at the
+  // jump and there will be no current frame after the jump.
+  virtual void Jump();
+  void Jump(Result* arg);
+  void Jump(Result* arg0, Result* arg1);
+  void Jump(Result* arg0, Result* arg1, Result* arg2);
+
+  // Emit a conditional branch to the target.  There must be a current
+  // frame at the branch.  The current frame will fall through to the
+  // code after the branch.
+  virtual void Branch(Condition cc, Hint hint = no_hint);
+  void Branch(Condition cc, Result* arg, Hint hint = no_hint);
+  void Branch(Condition cc, Result* arg0, Result* arg1, Hint hint = no_hint);
+  void Branch(Condition cc,
+              Result* arg0,
+              Result* arg1,
+              Result* arg2,
+              Hint hint = no_hint);
+  void Branch(Condition cc,
+              Result* arg0,
+              Result* arg1,
+              Result* arg2,
+              Result* arg3,
+              Hint hint = no_hint);
+
+  // Bind a jump target.  If there is no current frame at the binding
+  // site, there must be at least one frame reaching via a forward
+  // jump.
+  //
+  // The number of mergable elements is a number of frame elements
+  // counting from the top down which must be "mergable" (not
+  // constants or copies) in the entry frame at the jump target.
+  // Backward jumps to the target must contain the same constants and
+  // sharing as the entry frame, except for the mergable elements.
+  //
+  // A mergable elements argument of kAllElements indicates that all
+  // frame elements must be mergable.  Mergable elements are ignored
+  // completely for forward-only jump targets.
+  virtual void Bind(int mergable_elements = kAllElements);
+  void Bind(Result* arg, int mergable_elements = kAllElements);
+  void Bind(Result* arg0, Result* arg1, int mergable_elements = kAllElements);
+  void Bind(Result* arg0,
+            Result* arg1,
+            Result* arg2,
+            int mergable_elements = kAllElements);
+  void Bind(Result* arg0,
+            Result* arg1,
+            Result* arg2,
+            Result* arg3,
+            int mergable_elements = kAllElements);
+
+  // Emit a call to a jump target.  There must be a current frame at
+  // the call.  The frame at the target is the same as the current
+  // frame except for an extra return address on top of it.  The frame
+  // after the call is the same as the frame before the call.
+  void Call();
+
+  static const int kAllElements = -1;  // Not a valid number of elements.
+
+ protected:
+  // The code generator gives access to its current frame.
+  CodeGenerator* cgen_;
+
+  // Used to emit code.
+  MacroAssembler* masm_;
+
+  // Directionality flag set at initialization time.
+  Directionality direction_;
+
+  // A list of frames reaching this block via forward jumps.
+  List<VirtualFrame*> reaching_frames_;
+
+  // A parallel list of labels for merge code.
+  List<Label> merge_labels_;
+
+  // The frame used on entry to the block and expected at backward
+  // jumps to the block.  Set when the jump target is bound, but may
+  // or may not be set for forward-only blocks.
+  VirtualFrame* entry_frame_;
+
+  // The actual entry label of the block.
+  Label entry_label_;
+
+  // A target is bound if its Bind member function has been called.
+  // It is linked if it is not bound but its Jump, Branch, or Call
+  // member functions have been called.
+  bool is_bound_;
+  bool is_linked_;
+
+ private:
+  // Add a virtual frame reaching this labeled block via a forward
+  // jump, and a fresh label for its merge code.
+  void AddReachingFrame(VirtualFrame* frame);
+
+  // Choose an element from a pair of frame elements to be in the
+  // expected frame.  Return null if they are incompatible.
+  FrameElement* Combine(FrameElement* left, FrameElement* right);
+
+  // Compute a frame to use for entry to this block.  Mergable
+  // elements is as described for the Bind function.
+  void ComputeEntryFrame(int mergable_elements);
+
+  DISALLOW_COPY_AND_ASSIGN(JumpTarget);
+};
+
+
+// -------------------------------------------------------------------------
+// Break targets
+//
+// A break target is a jump target that can be used to break out of a
+// statement that keeps extra state on the stack (eg, for/in or
+// try/finally).  They know the expected stack height at the target
+// and will drop state from nested statements as part of merging.
+//
+// Break targets are used for return, break, and continue targets.
+
+class BreakTarget : public JumpTarget {
+ public:
+  // Construct a break target without a code generator.  A code
+  // generator must be supplied before using the break target as a
+  // label.  This is useful, eg, when break targets are embedded in AST
+  // nodes.
+  BreakTarget() {}
+
+  // Supply a code generator, expected expression stack height, and
+  // directionality to an already constructed break target.  This
+  // function expects to be given a non-null code generator, and to be
+  // called only when the code generator is not yet set.
+  virtual void Initialize(CodeGenerator* cgen,
+                          Directionality direction = FORWARD_ONLY);
+
+  // Copy the state of this break target to the destination.  The
+  // lists of forward-reaching frames and merge-point labels are
+  // copied.  All virtual frame pointers are copied, not the
+  // pointed-to frames.  The previous state of the destination is
+  // overwritten, without deallocating pointed-to virtual frames.
+  void CopyTo(BreakTarget* destination);
+
+  // Emit a jump to the target.  There must be a current frame at the
+  // jump and there will be no current frame after the jump.
+  virtual void Jump();
+
+  // Emit a conditional branch to the target.  There must be a current
+  // frame at the branch.  The current frame will fall through to the
+  // code after the branch.
+  virtual void Branch(Condition cc, Hint hint = no_hint);
+
+  // Bind a break target.  If there is no current frame at the binding
+  // site, there must be at least one frame reaching via a forward
+  // jump.
+  virtual void Bind(int mergable_elements = kAllElements);
+
+  // Setter for expected height.
+  void set_expected_height(int expected) { expected_height_ = expected; }
+
+ private:
+  // The expected height of the expression stack where the target will
+  // be bound, statically known at initialization time.
+  int expected_height_;
+
+  DISALLOW_COPY_AND_ASSIGN(BreakTarget);
+};
+
+
+// -------------------------------------------------------------------------
+// Shadow break targets
+//
+// A shadow break target represents a break target that is temporarily
+// shadowed by another one (represented by the original during
+// shadowing).  They are used to catch jumps to labels in certain
+// contexts, e.g. try blocks.  After shadowing ends, the formerly
+// shadowed target is again represented by the original and the
+// ShadowTarget can be used as a jump target in its own right,
+// representing the formerly shadowing target.
+
+class ShadowTarget : public BreakTarget {
+ public:
+  // Construct a shadow jump target.  After construction the shadow
+  // target object holds the state of the original target, and the
+  // original target is actually a fresh one that intercepts control
+  // flow intended for the shadowed one.
+  explicit ShadowTarget(BreakTarget* shadowed);
+
+  virtual ~ShadowTarget() {
+    ASSERT(!is_shadowing_);
+  }
+
+  // End shadowing.  After shadowing ends, the original jump target
+  // again gives access to the formerly shadowed target and the shadow
+  // target object gives access to the formerly shadowing target.
+  void StopShadowing();
+
+  // During shadowing, the currently shadowing target.  After
+  // shadowing, the target that was shadowed.
+  BreakTarget* other_target() const { return other_target_; }
+
+ private:
+  // During shadowing, the currently shadowing target.  After
+  // shadowing, the target that was shadowed.
+  BreakTarget* other_target_;
+
+#ifdef DEBUG
+  bool is_shadowing_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(ShadowTarget);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_JUMP_TARGET_H_
diff --git a/src/list-inl.h b/src/list-inl.h
index 790bdd8..6dbd214 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -34,26 +34,29 @@
 
 
 template<typename T, class P>
-T& List<T, P>::Add(const T& element) {
-  if (length_ >= capacity_) {
+void List<T, P>::Add(const T& element) {
+  if (length_ < capacity_) {
+    data_[length_++] = element;
+  } else {
     // Grow the list capacity by 50%, but make sure to let it grow
     // even when the capacity is zero (possible initial case).
     int new_capacity = 1 + capacity_ + (capacity_ >> 1);
     T* new_data = NewData(new_capacity);
     memcpy(new_data, data_, capacity_ * sizeof(T));
+    // Since the element reference could be an element of the list,
+    // assign it to the new backing store before deleting the old.
+    new_data[length_++] = element;
     DeleteData(data_);
     data_ = new_data;
     capacity_ = new_capacity;
   }
-  return data_[length_++] = element;
 }
 
 
 template<typename T, class P>
-Vector<T> List<T, P>::AddBlock(const T& element, int count) {
+Vector<T> List<T, P>::AddBlock(T value, int count) {
   int start = length_;
-  for (int i = 0; i < count; i++)
-    Add(element);
+  for (int i = 0; i < count; i++) Add(value);
   return Vector<T>(&data_[start], count);
 }
 
diff --git a/src/list.h b/src/list.h
index 2f8aa90..15f31fc 100644
--- a/src/list.h
+++ b/src/list.h
@@ -53,14 +53,16 @@
   INLINE(void* operator new(size_t size)) { return P::New(size); }
   INLINE(void operator delete(void* p, size_t)) { return P::Delete(p); }
 
+  // Returns a reference to the element at index i.  This reference is
+  // not safe to use after operations that can change the list's
+  // backing store (eg, Add).
   inline T& operator[](int i) const  {
     ASSERT(0 <= i && i < length_);
     return data_[i];
   }
-  inline T& at(int i) const  { return this->operator[](i); }
-  INLINE(const T& last() const)  {
-    ASSERT(!is_empty());
-    return this->at(length_ - 1);
+  inline T& at(int i) const  { return operator[](i); }
+  inline T& last() const {
+    return at(length_ - 1);
   }
 
   INLINE(bool is_empty() const) { return length_ == 0; }
@@ -72,16 +74,17 @@
 
   // Adds a copy of the given 'element' to the end of the list,
   // expanding the list if necessary.
-  T& Add(const T& element);
+  void Add(const T& element);
 
   // Added 'count' elements with the value 'value' and returns a
   // vector that allows access to the elements.  The vector is valid
   // until the next change is made to this list.
-  Vector<T> AddBlock(const T& value, int count);
+  Vector<T> AddBlock(T value, int count);
 
   // Removes the i'th element without deleting it even if T is a
   // pointer type; moves all elements above i "down". Returns the
-  // removed element.
+  // removed element.  This function's complexity is linear in the
+  // size of the list.
   T Remove(int i);
 
   // Removes the last element without deleting it even if T is a
diff --git a/src/log.cc b/src/log.cc
index ed8f475..805bb51 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -137,16 +137,32 @@
 // StackTracer implementation
 //
 void StackTracer::Trace(TickSample* sample) {
-  // Assuming that stack grows from lower addresses
-  if (sample->state != GC
-      && (sample->sp < sample->fp && sample->fp < low_stack_bound_)) {
-    sample->InitStack(1);
+  if (sample->state == GC) {
+    sample->frames_count = 0;
+    return;
+  }
+
+  // If c_entry_fp is available, this means that we are inside a C++
+  // function and sample->fp value isn't reliable due to FPO.
+  if (Top::c_entry_fp(Top::GetCurrentThread()) != NULL) {
+    SafeStackTraceFrameIterator it(
+        reinterpret_cast<Address>(sample->sp),
+        reinterpret_cast<Address>(low_stack_bound_));
+    int i = 0;
+    while (!it.done() && i < TickSample::kMaxFramesCount) {
+      sample->stack[i++] = it.frame()->pc();
+      it.Advance();
+    }
+    sample->frames_count = i;
+  } else if (sample->sp < sample->fp && sample->fp < low_stack_bound_) {
+    // The check assumes that stack grows from lower addresses.
     sample->stack[0] = Memory::Address_at(
         (Address)(sample->fp + StandardFrameConstants::kCallerPCOffset));
+    sample->frames_count = 1;
   } else {
-    // GC runs or FP seems to be in some intermediate state,
+    // FP seems to be in some intermediate state,
     // better discard this sample
-    sample->InitStack(0);
+    sample->frames_count = 0;
   }
 }
 
@@ -687,24 +703,13 @@
 }
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
-int Logger::CodeObjectSize(Code* code) {
-  // Check that the assumptions about the layout of the code object holds.
-  ASSERT_EQ(reinterpret_cast<unsigned int>(code->instruction_start()) -
-            reinterpret_cast<unsigned int>(code->address()),
-            Code::kHeaderSize);
-  return code->instruction_size() + Code::kHeaderSize;
-}
-#endif
-
-
 void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (logfile_ == NULL || !FLAG_log_code) return;
   LogMessageBuilder msg;
   msg.Append("code-creation,%s,0x%x,%d,\"", tag,
              reinterpret_cast<unsigned int>(code->address()),
-             CodeObjectSize(code));
+             code->ExecutableSize());
   for (const char* p = comment; *p != '\0'; p++) {
     if (*p == '"') {
       msg.Append('\\');
@@ -726,7 +731,7 @@
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   msg.Append("code-creation,%s,0x%x,%d,\"%s\"\n", tag,
              reinterpret_cast<unsigned int>(code->address()),
-             CodeObjectSize(code), *str);
+             code->ExecutableSize(), *str);
   msg.WriteToLogFile();
 #endif
 }
@@ -743,7 +748,7 @@
       source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   msg.Append("code-creation,%s,0x%x,%d,\"%s %s:%d\"\n", tag,
              reinterpret_cast<unsigned int>(code->address()),
-             CodeObjectSize(code),
+             code->ExecutableSize(),
              *str, *sourcestr, line);
   msg.WriteToLogFile();
 #endif
@@ -756,7 +761,7 @@
   LogMessageBuilder msg;
   msg.Append("code-creation,%s,0x%x,%d,\"args_count: %d\"\n", tag,
              reinterpret_cast<unsigned int>(code->address()),
-             CodeObjectSize(code),
+             code->ExecutableSize(),
              args_count);
   msg.WriteToLogFile();
 #endif
@@ -932,10 +937,8 @@
   if (overflow) {
     msg.Append(",overflow");
   }
-  if (*(sample->stack)) {
-    for (size_t i = 0; sample->stack[i]; ++i) {
-      msg.Append(",0x%x", reinterpret_cast<unsigned int>(sample->stack[i]));
-    }
+  for (int i = 0; i < sample->frames_count; ++i) {
+    msg.Append(",%p", sample->stack[i]);
   }
   msg.Append('\n');
   msg.WriteToLogFile();
diff --git a/src/log.h b/src/log.h
index aceb282..1066e34 100644
--- a/src/log.h
+++ b/src/log.h
@@ -215,10 +215,6 @@
 
  private:
 
-  // Calculate the size of the code object to report for log events. This takes
-  // the layout of the code object into account.
-  static int CodeObjectSize(Code* code);
-
   // Emits the source code of a regexp. Used by regexp events.
   static void LogRegExpSource(Handle<JSRegExp> regexp);
 
@@ -268,6 +264,8 @@
   friend class Profiler;
   friend class SlidingStateWindow;
   friend class VMState;
+#else
+  static bool is_enabled() { return false; }
 #endif
 };
 
@@ -279,6 +277,7 @@
       : low_stack_bound_(low_stack_bound) { }
   void Trace(TickSample* sample);
  private:
+
   unsigned int low_stack_bound_;
 };
 
diff --git a/src/macro-assembler-ia32.cc b/src/macro-assembler-ia32.cc
index 506f890..4fad3be 100644
--- a/src/macro-assembler-ia32.cc
+++ b/src/macro-assembler-ia32.cc
@@ -35,6 +35,9 @@
 
 namespace v8 { namespace internal {
 
+// -------------------------------------------------------------------------
+// MacroAssembler implementation.
+
 MacroAssembler::MacroAssembler(void* buffer, int size)
     : Assembler(buffer, size),
       unresolved_(0),
@@ -111,8 +114,7 @@
   // scratch) OOOOAAAASSSS.
   class ScratchBits: public BitField<uint32_t, 0, 4> {};
   class AddressBits: public BitField<uint32_t, 4, 4> {};
-  class ObjectBits: public BitField<uint32_t, 8, 4> {
-};
+  class ObjectBits: public BitField<uint32_t, 8, 4> {};
 
   Major MajorKey() { return RecordWrite; }
 
@@ -304,6 +306,20 @@
 }
 
 
+void MacroAssembler::CmpObjectType(Register heap_object,
+                                   InstanceType type,
+                                   Register map) {
+  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+  CmpInstanceType(map, type);
+}
+
+
+void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
+  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
+       static_cast<int8_t>(type));
+}
+
+
 void MacroAssembler::FCmp() {
   fcompp();
   push(eax);
@@ -606,6 +622,19 @@
 }
 
 
+void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
+                                      Register result,
+                                      Register op,
+                                      JumpTarget* then_target) {
+  JumpTarget ok(cgen);
+  test(result, Operand(result));
+  ok.Branch(not_zero, taken);
+  test(op, Operand(op));
+  then_target->Branch(sign, not_taken);
+  ok.Bind();
+}
+
+
 void MacroAssembler::NegativeZeroTest(Register result,
                                       Register op,
                                       Label* then_label) {
@@ -642,9 +671,7 @@
   j(zero, miss, not_taken);
 
   // Check that the function really is a function.
-  mov(result, FieldOperand(function, HeapObject::kMapOffset));
-  movzx_b(scratch, FieldOperand(result, Map::kInstanceTypeOffset));
-  cmp(scratch, JS_FUNCTION_TYPE);
+  CmpObjectType(function, JS_FUNCTION_TYPE, result);
   j(not_equal, miss, not_taken);
 
   // Make sure that the function has an instance prototype.
@@ -665,9 +692,7 @@
 
   // If the function does not have an initial map, we're done.
   Label done;
-  mov(scratch, FieldOperand(result, HeapObject::kMapOffset));
-  movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  cmp(scratch, MAP_TYPE);
+  CmpObjectType(result, MAP_TYPE, scratch);
   j(not_equal, &done);
 
   // Get the prototype from the initial map.
@@ -862,7 +887,7 @@
   bool resolved;
   Handle<Code> code = ResolveBuiltin(id, &resolved);
 
-    // Calls are not allowed in some stubs.
+  // Calls are not allowed in some stubs.
   ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
 
   // Rely on the assertion to check that the number of provided
diff --git a/src/macro-assembler-ia32.h b/src/macro-assembler-ia32.h
index b389df2..40aa84a 100644
--- a/src/macro-assembler-ia32.h
+++ b/src/macro-assembler-ia32.h
@@ -32,8 +32,11 @@
 
 namespace v8 { namespace internal {
 
+// Forward declaration.
+class JumpTarget;
 
-// Helper type to make boolean flag easier to read at call-site.
+
+// Helper types to make flags easier to read at call sites.
 enum InvokeFlag {
   CALL_FUNCTION,
   JUMP_FUNCTION
@@ -136,6 +139,13 @@
   void Set(Register dst, const Immediate& x);
   void Set(const Operand& dst, const Immediate& x);
 
+  // Compare object type for heap object.
+  // Incoming register is heap_object and outgoing register is map.
+  void CmpObjectType(Register heap_object, InstanceType type, Register map);
+
+  // Compare instance type for map.
+  void CmpInstanceType(Register map, InstanceType type);
+
   // FCmp is similar to integer cmp, but requires unsigned
   // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
   void FCmp();
@@ -179,6 +189,12 @@
   // Check if result is zero and op is negative.
   void NegativeZeroTest(Register result, Register op, Label* then_label);
 
+  // Check if result is zero and op is negative in code using jump targets.
+  void NegativeZeroTest(CodeGenerator* cgen,
+                        Register result,
+                        Register op,
+                        JumpTarget* then_target);
+
   // Check if result is zero and any of op1 and op2 are negative.
   // Register scratch is destroyed, and it must be different from op2.
   void NegativeZeroTest(Register result, Register op1, Register op2,
@@ -327,7 +343,6 @@
   return Operand(object, index, scale, offset - kHeapObjectTag);
 }
 
-
 } }  // namespace v8::internal
 
 #endif  // V8_MACRO_ASSEMBLER_IA32_H_
diff --git a/src/macros.py b/src/macros.py
index b036c63..a3db5f9 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -1,4 +1,4 @@
-# Copyright 2006-2008 the V8 project authors. All rights reserved.
+# Copyright 2006-2009 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -99,3 +99,22 @@
 # Accessors for original global properties that ensure they have been loaded.
 const ORIGINAL_REGEXP = (global.RegExp, $RegExp);
 const ORIGINAL_DATE   = (global.Date, $Date);
+
+# Constants used on an array to implement the properties of the RegExp object.
+const REGEXP_NUMBER_OF_CAPTURES = 0;
+const REGEXP_FIRST_CAPTURE = 1;
+
+# We can't put macros in macros so we use constants here.
+# REGEXP_NUMBER_OF_CAPTURES
+macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
+
+# Last input and last subject are after the captures so we can omit them on
+# results returned from global searches.  Beware - these evaluate their
+# arguments twice.
+macro LAST_SUBJECT(array) = ((array)[(array)[0] + 1]);
+macro LAST_INPUT(array) = ((array)[(array)[0] + 2]);
+
+# REGEXP_FIRST_CAPTURE
+macro CAPTURE(index) = (1 + (index));
+const CAPTURE0 = 1;
+const CAPTURE1 = 2;
diff --git a/src/messages.js b/src/messages.js
index d5adbc9..c2fc5fc 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -153,7 +153,7 @@
     args = [];
   }
 
-  var e = new constructor();
+  var e = new constructor(kAddMessageAccessorsMarker);
   e.type = type;
   e.arguments = args;
   return e;
@@ -181,7 +181,7 @@
 
 function GetLineNumber(message) {
   if (message.startPos == -1) return -1;
-  var location = message.script.locationFromPosition(message.startPos);
+  var location = message.script.locationFromPosition(message.startPos, true);
   if (location == null) return -1;
   return location.line + 1;
 }
@@ -190,7 +190,7 @@
 // Returns the source code line containing the given source
 // position, or the empty string if the position is invalid.
 function GetSourceLine(message) {
-  var location = message.script.locationFromPosition(message.startPos);
+  var location = message.script.locationFromPosition(message.startPos, true);
   if (location == null) return "";
   location.restrict();
   return location.sourceText();
@@ -230,10 +230,13 @@
 /**
  * Get information on a specific source position.
  * @param {number} position The source position
+ * @param {boolean} include_resource_offset Set to true to have the resource
+ *     offset added to the location
  * @return {SourceLocation}
  *     If line is negative or not in the source null is returned.
  */
-Script.prototype.locationFromPosition = function (position) {
+Script.prototype.locationFromPosition = function (position,
+                                                  include_resource_offset) {
   var lineCount = this.lineCount();
   var line = -1;
   if (position <= this.line_ends[0]) {
@@ -256,9 +259,11 @@
   var column = position - start;
 
   // Adjust according to the offset within the resource.
-  line += this.line_offset;
-  if (line == this.line_offset) {
-    column += this.column_offset;
+  if (include_resource_offset) {
+    line += this.line_offset;
+    if (line == this.line_offset) {
+      column += this.column_offset;
+    }
   }
 
   return new SourceLocation(this, position, line, column, start, end);
@@ -573,7 +578,7 @@
       file = %FunctionGetScript(fun).data;
     }
     if (file) {
-      var location = %FunctionGetScript(fun).locationFromPosition(pos);
+      var location = %FunctionGetScript(fun).locationFromPosition(pos, true);
       if (!isTopLevel) result += "(";
       result += file;
       if (location != null) {
@@ -589,6 +594,29 @@
 // ----------------------------------------------------------------------------
 // Error implementation
 
+// If this object gets passed to an error constructor the error will
+// get an accessor for .message that constructs a descriptive error
+// message on access.
+var kAddMessageAccessorsMarker = { };
+
+// Defines accessors for a property that is calculated the first time
+// the property is read and then replaces the accessor with the value.
+// Also, setting the property causes the accessors to be deleted.
+function DefineOneShotAccessor(obj, name, fun) {
+  // Note that the accessors consistently operate on 'obj', not 'this'.
+  // Since the object may occur in someone else's prototype chain we
+  // can't rely on 'this' being the same as 'obj'.
+  obj.__defineGetter__(name, function () {
+    var value = fun(obj);
+    obj[name] = value;
+    return value;
+  });
+  obj.__defineSetter__(name, function (v) {
+    delete obj[name];
+    obj[name] = v;
+  });
+}
+
 function DefineError(f) {
   // Store the error function in both the global object
   // and the runtime object. The function is fetched
@@ -600,14 +628,30 @@
   %SetProperty(global, name, f, DONT_ENUM);
   this['$' + name] = f;
   // Configure the error function.
-  // prototype of 'Error' must be as default: new Object().
-  if (name != 'Error') %FunctionSetPrototype(f, new $Error());
+  if (name == 'Error') {
+    // The prototype of the Error object must itself be an error.
+    // However, it can't be an instance of the Error object because
+    // it hasn't been properly configured yet.  Instead we create a
+    // special not-a-true-error-but-close-enough object.
+    function ErrorPrototype() {}
+    %FunctionSetPrototype(ErrorPrototype, $Object.prototype);
+    %FunctionSetInstanceClassName(ErrorPrototype, 'Error');
+    %FunctionSetPrototype(f, new ErrorPrototype());
+  } else {
+    %FunctionSetPrototype(f, new $Error());
+  }
   %FunctionSetInstanceClassName(f, 'Error');
   %SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
   f.prototype.name = name;
   %SetCode(f, function(m) {
     if (%IsConstructCall()) {
-      if (!IS_UNDEFINED(m)) this.message = ToString(m);
+      if (m === kAddMessageAccessorsMarker) {
+        DefineOneShotAccessor(this, 'message', function (obj) {
+          return FormatMessage({type: obj.type, args: obj.arguments});
+        });
+      } else if (!IS_UNDEFINED(m)) {
+        this.message = ToString(m);
+      }
     } else {
       return new f(m);
     }
diff --git a/src/mirror-delay.js b/src/mirror-delay.js
index 2f413fa..d5da445 100644
--- a/src/mirror-delay.js
+++ b/src/mirror-delay.js
@@ -1359,7 +1359,8 @@
 
 FrameMirror.prototype.sourceLocation = function() {
   if (this.func().resolved() && this.func().script()) {
-    return this.func().script().locationFromPosition(this.sourcePosition());
+    return this.func().script().locationFromPosition(this.sourcePosition(),
+                                                     true);
   }
 };
 
@@ -1561,6 +1562,11 @@
 };
 
 
+ScriptMirror.prototype.id = function() {
+  return this.script_.id;
+};
+
+
 ScriptMirror.prototype.source = function() {
   return this.script_.source;
 };
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 0362a15..5b1e0b3 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -658,7 +658,7 @@
 void Code::CodePrint() {
   HeapObject::PrintHeader("Code");
 #ifdef ENABLE_DISASSEMBLER
-  Disassemble();
+  Disassemble(NULL);
 #endif
 }
 
@@ -696,9 +696,20 @@
       break;
     }
     case JSRegExp::IRREGEXP: {
+      bool is_native = FLAG_regexp_native;
+#ifdef ARM
+      // No native regexp on arm yet.
+      is_native = false;
+#endif
       FixedArray* arr = FixedArray::cast(data());
-      Object* irregexp_data = arr->get(JSRegExp::kIrregexpDataIndex);
-      ASSERT(irregexp_data->IsFixedArray());
+      Object* ascii_data = arr->get(JSRegExp::kIrregexpASCIICodeIndex);
+      ASSERT(ascii_data->IsTheHole()
+          || (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
+      Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
+      ASSERT(uc16_data->IsTheHole()
+          || (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
+      ASSERT(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
+      ASSERT(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());
       break;
     }
     default:
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 9705b75..9aee342 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2041,6 +2041,7 @@
 
 ACCESSORS(Script, source, Object, kSourceOffset)
 ACCESSORS(Script, name, Object, kNameOffset)
+ACCESSORS(Script, id, Object, kIdOffset)
 ACCESSORS(Script, line_offset, Smi, kLineOffsetOffset)
 ACCESSORS(Script, column_offset, Smi, kColumnOffsetOffset)
 ACCESSORS(Script, wrapper, Proxy, kWrapperOffset)
@@ -2337,6 +2338,13 @@
 }
 
 
+void JSRegExp::SetDataAt(int index, Object* value) {
+  ASSERT(TypeTag() != NOT_COMPILED);
+  ASSERT(index >= kDataIndex);  // Only implementation data can be set this way.
+  FixedArray::cast(data())->set(index, value);
+}
+
+
 bool JSObject::HasFastElements() {
   return !elements()->IsDictionary();
 }
diff --git a/src/objects.cc b/src/objects.cc
index c4b49ce..b8228f0 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2391,7 +2391,7 @@
 int Map::NumberOfDescribedProperties() {
   int result = 0;
   for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) {
-    if (!r.IsTransition()) result++;
+    if (r.IsProperty()) result++;
   }
   return result;
 }
@@ -2399,7 +2399,7 @@
 
 int Map::PropertyIndexFor(String* name) {
   for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) {
-    if (r.Equals(name)) return r.GetFieldIndex();
+    if (r.Equals(name) && !r.IsNullDescriptor()) return r.GetFieldIndex();
   }
   return -1;
 }
@@ -2933,6 +2933,7 @@
   int new_size = number_of_descriptors() - transitions - null_descriptors;
 
   // If key is in descriptor, we replace it in-place when filtering.
+  // Count a null descriptor for key as inserted, not replaced.
   int index = Search(descriptor->GetKey());
   const bool inserting = (index == kNotFound);
   const bool replacing = !inserting;
@@ -2949,9 +2950,9 @@
         t == CALLBACKS ||
         t == INTERCEPTOR) {
       keep_enumeration_index = true;
-    } else if (t == NULL_DESCRIPTOR || remove_transitions) {
-     // Replaced descriptor has been counted as removed if it is null
-     // or a transition that will be replaced.  Adjust count in this case.
+    } else if (remove_transitions) {
+     // Replaced descriptor has been counted as removed if it is
+     // a transition that will be replaced.  Adjust count in this case.
       ++new_size;
     }
   }
@@ -2990,7 +2991,9 @@
     ASSERT(r.GetKey() == descriptor->GetKey());
     r.advance();
   } else {
-    ASSERT(r.eos() || r.GetKey()->Hash() > descriptor_hash);
+    ASSERT(r.eos() ||
+           r.GetKey()->Hash() > descriptor_hash ||
+           r.IsNullDescriptor());
   }
   for (; !r.eos(); r.advance()) {
     if (r.IsNullDescriptor()) continue;
@@ -3004,24 +3007,25 @@
 
 
 Object* DescriptorArray::RemoveTransitions() {
-  // Remove all transitions.  Return a copy of the array with all transitions
-  // removed, or a Failure object if the new array could not be allocated.
+  // Remove all transitions and null descriptors. Return a copy of the array
+  // with all transitions removed, or a Failure object if the new array could
+  // not be allocated.
 
   // Compute the size of the map transition entries to be removed.
-  int count_transitions = 0;
+  int num_removed = 0;
   for (DescriptorReader r(this); !r.eos(); r.advance()) {
-    if (r.IsTransition()) count_transitions++;
+    if (!r.IsProperty()) num_removed++;
   }
 
   // Allocate the new descriptor array.
-  Object* result = Allocate(number_of_descriptors() - count_transitions);
+  Object* result = Allocate(number_of_descriptors() - num_removed);
   if (result->IsFailure()) return result;
   DescriptorArray* new_descriptors = DescriptorArray::cast(result);
 
   // Copy the content.
   DescriptorWriter w(new_descriptors);
   for (DescriptorReader r(this); !r.eos(); r.advance()) {
-    if (!r.IsTransition()) w.WriteFrom(&r);
+    if (r.IsProperty()) w.WriteFrom(&r);
   }
   ASSERT(w.eos());
 
@@ -3097,10 +3101,10 @@
     ASSERT(hash == mid_hash);
     // There might be more, so we find the first one and
     // check them all to see if we have a match.
-    if (name == mid_name) return mid;
+    if (name == mid_name  && !is_null_descriptor(mid)) return mid;
     while ((mid > low) && (GetKey(mid - 1)->Hash() == hash)) mid--;
     for (; (mid <= high) && (GetKey(mid)->Hash() == hash); mid++) {
-      if (GetKey(mid)->Equals(name)) return mid;
+      if (GetKey(mid)->Equals(name) && !is_null_descriptor(mid)) return mid;
     }
     break;
   }
@@ -3110,7 +3114,9 @@
 
 int DescriptorArray::LinearSearch(String* name, int len) {
   for (int number = 0; number < len; number++) {
-    if (name->Equals(GetKey(number))) return number;
+    if (name->Equals(GetKey(number)) && !is_null_descriptor(number)) {
+      return number;
+    }
   }
   return kNotFound;
 }
@@ -4795,10 +4801,13 @@
 }
 
 
-void Code::Disassemble() {
-  PrintF("kind = %s", Kind2String(kind()));
+void Code::Disassemble(const char* name) {
+  PrintF("kind = %s\n", Kind2String(kind()));
+  if ((name != NULL) && (name[0] != '\0')) {
+    PrintF("name = %s\n", name);
+  }
 
-  PrintF("\nInstructions (size = %d)\n", instruction_size());
+  PrintF("Instructions (size = %d)\n", instruction_size());
   Disassembler::Decode(NULL, this);
   PrintF("\n");
 
@@ -4879,6 +4888,22 @@
 }
 
 
+void JSArray::EnsureSize(int required_size) {
+  Handle<JSArray> self(this);
+  ASSERT(HasFastElements());
+  if (elements()->length() >= required_size) return;
+  Handle<FixedArray> old_backing(elements());
+  int old_size = old_backing->length();
+  // Doubling in size would be overkill, but leave some slack to avoid
+  // constantly growing.
+  int new_size = required_size + (required_size >> 3);
+  Handle<FixedArray> new_backing = Factory::NewFixedArray(new_size);
+  // Can't use this any more now because we may have had a GC!
+  for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
+  self->SetContent(*new_backing);
+}
+
+
 // Computes the new capacity when expanding the elements of a JSObject.
 static int NewElementsCapacity(int old_capacity) {
   // (old_capacity + 50%) + 16
@@ -5640,7 +5665,8 @@
          !r.eos();
          r.advance()) {
       PropertyDetails details = r.GetDetails();
-      if (!details.IsTransition() && (details.attributes() & filter) == 0) {
+      if (details.IsProperty() &&
+          (details.attributes() & filter) == 0) {
         result++;
       }
     }
@@ -5782,7 +5808,7 @@
     for (DescriptorReader r(map()->instance_descriptors());
          !r.eos();
          r.advance()) {
-      if (!r.IsTransition()) {
+      if (r.IsProperty()) {
         storage->set(index++, r.GetKey());
       }
     }
@@ -6949,76 +6975,6 @@
 }
 
 
-// Init line_ends array with code positions of line ends inside script source
-void Script::InitLineEnds() {
-  if (!line_ends()->IsUndefined()) return;
-
-  if (!source()->IsString()) {
-    ASSERT(source()->IsUndefined());
-    set_line_ends(*(Factory::NewJSArray(0)));
-    ASSERT(line_ends()->IsJSArray());
-    return;
-  }
-
-  Handle<String> src(String::cast(source()));
-  const int src_len = src->length();
-  Handle<String> new_line = Factory::NewStringFromAscii(CStrVector("\n"));
-
-  // Pass 1: Identify line count
-  int line_count = 0;
-  int position = 0;
-  while (position != -1 && position < src_len) {
-    position = Runtime::StringMatch(src, new_line, position);
-    if (position != -1) {
-      position++;
-    }
-    // Even if the last line misses a line end, it is counted
-    line_count++;
-  }
-
-  // Pass 2: Fill in line ends positions
-  Handle<FixedArray> array = Factory::NewFixedArray(line_count);
-  int array_index = 0;
-  position = 0;
-  while (position != -1 && position < src_len) {
-    position = Runtime::StringMatch(src, new_line, position);
-    // If the script does not end with a line ending add the final end position
-    // as just past the last line ending.
-    array->set(array_index++,
-               Smi::FromInt(position != -1 ? position++ : src_len));
-  }
-  ASSERT(array_index == line_count);
-
-  Handle<JSArray> object = Factory::NewJSArrayWithElements(array);
-  set_line_ends(*object);
-  ASSERT(line_ends()->IsJSArray());
-}
-
-
-// Convert code position into line number
-int Script::GetLineNumber(int code_pos) {
-  InitLineEnds();
-  JSArray* line_ends_array = JSArray::cast(line_ends());
-  const int line_ends_len = (Smi::cast(line_ends_array->length()))->value();
-
-  int line = -1;
-  if (line_ends_len > 0 &&
-      code_pos <= (Smi::cast(line_ends_array->GetElement(0)))->value()) {
-    line = 0;
-  } else {
-    for (int i = 1; i < line_ends_len; ++i) {
-      if ((Smi::cast(line_ends_array->GetElement(i - 1)))->value() < code_pos &&
-          code_pos <= (Smi::cast(line_ends_array->GetElement(i)))->value()) {
-        line = i;
-        break;
-      }
-    }
-  }
-
-  return line != -1 ? line + line_offset()->value() : line;
-}
-
-
 // Check if there is a break point at this code position.
 bool DebugInfo::HasBreakPoint(int code_position) {
   // Get the break point info object for this code position.
diff --git a/src/objects.h b/src/objects.h
index 71a9ff5..28df02a 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -144,6 +144,10 @@
     return t == MAP_TRANSITION || t == CONSTANT_TRANSITION;
   }
 
+  bool IsProperty() {
+    return type() < FIRST_PHANTOM_PROPERTY_TYPE;
+  }
+
   PropertyAttributes attributes() { return AttributesField::decode(value_); }
 
   int index() { return IndexField::decode(value_); }
@@ -1433,6 +1437,7 @@
 
   static const uint32_t kMaxGap = 1024;
   static const int kMaxFastElementsLength = 5000;
+  static const int kInitialMaxFastElementArray = 100000;
   static const int kMaxFastProperties = 8;
   static const int kMaxInstanceSize = 255 * kPointerSize;
   // When extending the backing storage for property values, we increase
@@ -1717,6 +1722,10 @@
     return( descriptor_number << 1) + 1;
   }
 
+  bool is_null_descriptor(int descriptor_number) {
+    return PropertyDetails(GetDetails(descriptor_number)).type() ==
+        NULL_DESCRIPTOR;
+  }
   // Swap operation on FixedArray without using write barriers.
   static inline void fast_swap(FixedArray* array, int first, int second);
 
@@ -2157,7 +2166,7 @@
   // Printing
   static const char* Kind2String(Kind kind);
   static const char* ICState2String(InlineCacheState state);
-  void Disassemble();
+  void Disassemble(const char* name);
 #endif  // ENABLE_DISASSEMBLER
 
   // [instruction_size]: Size of the native instructions
@@ -2258,6 +2267,16 @@
     return RoundUp(kHeaderSize + body_size + sinfo_size, kCodeAlignment);
   }
 
+  // Calculate the size of the code object to report for log events. This takes
+  // the layout of the code object into account.
+  int ExecutableSize() {
+    // Check that the assumptions about the layout of the code object holds.
+    ASSERT_EQ(reinterpret_cast<unsigned int>(instruction_start()) -
+              reinterpret_cast<unsigned int>(address()),
+              Code::kHeaderSize);
+    return instruction_size() + Code::kHeaderSize;
+  }
+
   // Locating source position.
   int SourcePosition(Address pc);
   int SourceStatementPosition(Address pc);
@@ -2531,6 +2550,9 @@
   // [name]: the script name.
   DECL_ACCESSORS(name, Object)
 
+  // [id]: the script id.
+  DECL_ACCESSORS(id, Object)
+
   // [line_offset]: script line offset in resource from where it was extracted.
   DECL_ACCESSORS(line_offset, Smi)
 
@@ -2554,9 +2576,6 @@
   void ScriptVerify();
 #endif
 
-  void InitLineEnds();
-  int GetLineNumber(int code_position);
-
   static const int kSourceOffset = HeapObject::kHeaderSize;
   static const int kNameOffset = kSourceOffset + kPointerSize;
   static const int kLineOffsetOffset = kNameOffset + kPointerSize;
@@ -2564,7 +2583,8 @@
   static const int kWrapperOffset = kColumnOffsetOffset + kPointerSize;
   static const int kTypeOffset = kWrapperOffset + kPointerSize;
   static const int kLineEndsOffset = kTypeOffset + kPointerSize;
-  static const int kSize = kLineEndsOffset + kPointerSize;
+  static const int kIdOffset = kLineEndsOffset + kPointerSize;
+  static const int kSize = kIdOffset + kPointerSize;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Script);
@@ -2946,6 +2966,19 @@
 };
 
 // Regular expressions
+// The regular expression holds a single reference to a FixedArray in
+// the kDataOffset field.
+// The FixedArray contains the following data:
+// - tag : type of regexp implementation (not compiled yet, atom or irregexp)
+// - reference to the original source string
+// - reference to the original flag string
+// If it is an atom regexp
+// - a reference to a literal string to search for
+// If it is an irregexp regexp:
+// - a reference to code for ASCII inputs (bytecode or compiled).
+// - a reference to code for UC16 inputs (bytecode or compiled).
+// - max number of registers used by irregexp implementations.
+// - number of capture registers (output values) of the regexp.
 class JSRegExp: public JSObject {
  public:
   // Meaning of Type:
@@ -2973,6 +3006,8 @@
   inline Flags GetFlags();
   inline String* Pattern();
   inline Object* DataAt(int index);
+  // Set implementation data after the object has been prepared.
+  inline void SetDataAt(int index, Object* value);
 
   static inline JSRegExp* cast(Object* obj);
 
@@ -2984,14 +3019,29 @@
   static const int kDataOffset = JSObject::kHeaderSize;
   static const int kSize = kDataOffset + kIntSize;
 
+  // Indices in the data array.
   static const int kTagIndex = 0;
   static const int kSourceIndex = kTagIndex + 1;
   static const int kFlagsIndex = kSourceIndex + 1;
-  // These two are the same since the same entry is shared for
-  // different purposes in different types of regexps.
-  static const int kAtomPatternIndex = kFlagsIndex + 1;
-  static const int kIrregexpDataIndex = kFlagsIndex + 1;
-  static const int kDataSize = kAtomPatternIndex + 1;
+  static const int kDataIndex = kFlagsIndex + 1;
+  // The data fields are used in different ways depending on the
+  // value of the tag.
+  // Atom regexps (literal strings).
+  static const int kAtomPatternIndex = kDataIndex;
+
+  static const int kAtomDataSize = kAtomPatternIndex + 1;
+
+  // Irregexp compiled code or bytecode for ASCII.
+  static const int kIrregexpASCIICodeIndex = kDataIndex;
+  // Irregexp compiled code or bytecode for UC16.
+  static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
+  // Maximal number of registers used by either ASCII or UC16.
+  // Only used to check that there is enough stack space
+  static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 2;
+  // Number of captures in the compiled regexp.
+  static const int kIrregexpCaptureCountIndex = kDataIndex + 3;
+
+  static const int kIrregexpDataSize = kIrregexpCaptureCountIndex + 1;
 };
 
 
@@ -3786,6 +3836,10 @@
   // Casting.
   static inline JSArray* cast(Object* obj);
 
+  // Uses handles.  Ensures that the fixed array backing the JSArray has at
+  // least the stated size.
+  void EnsureSize(int minimum_size_of_backing_fixed_array);
+
   // Dispatched behavior.
 #ifdef DEBUG
   void JSArrayPrint();
diff --git a/src/oprofile-agent.cc b/src/oprofile-agent.cc
new file mode 100644
index 0000000..e9f7d3e
--- /dev/null
+++ b/src/oprofile-agent.cc
@@ -0,0 +1,111 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "oprofile-agent.h"
+
+namespace v8 { namespace internal {
+
+#ifdef ENABLE_OPROFILE_AGENT
+op_agent_t OProfileAgent::handle_ = NULL;
+#endif
+
+
+bool OProfileAgent::Initialize() {
+#ifdef ENABLE_OPROFILE_AGENT
+  if (FLAG_oprofile) {
+    if (handle_ != NULL) return false;
+
+    // Disable code moving by GC.
+    FLAG_always_compact = false;
+    FLAG_never_compact = true;
+
+    handle_ = op_open_agent();
+    return (handle_ != NULL);
+  } else {
+    return true;
+  }
+#else
+  return true;
+#endif
+}
+
+
+void OProfileAgent::TearDown() {
+#ifdef ENABLE_OPROFILE_AGENT
+  if (handle_ != NULL) {
+    op_close_agent(handle_);
+  }
+#endif
+}
+
+
+void OProfileAgent::CreateNativeCodeRegion(const char* name,
+    const void* ptr, unsigned int size) {
+#ifdef ENABLE_OPROFILE_AGENT
+  if (handle_ == NULL) return;
+  op_write_native_code(handle_, name, (uint64_t)ptr, ptr, size);
+#endif
+}
+
+
+void OProfileAgent::CreateNativeCodeRegion(String* name,
+    const void* ptr, unsigned int size) {
+#ifdef ENABLE_OPROFILE_AGENT
+  if (handle_ != NULL) {
+    const char* func_name;
+    SmartPointer<char> str =
+        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+    func_name = name->length() > 0 ? *str : "<anonymous>";
+    CreateNativeCodeRegion(func_name, ptr, size);
+  }
+#endif
+}
+
+
+void OProfileAgent::CreateNativeCodeRegion(String* name, String* source,
+    int line_num, const void* ptr, unsigned int size) {
+#ifdef ENABLE_OPROFILE_AGENT
+  if (handle_ != NULL) {
+    Vector<char> buf = Vector<char>::New(OProfileAgent::kFormattingBufSize);
+    const char* func_name;
+    SmartPointer<char> str =
+        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+    func_name = name->length() > 0 ? *str : "<anonymous>";
+    SmartPointer<char> source_str =
+        source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+    if (v8::internal::OS::SNPrintF(buf, "%s %s:%d",
+                                   func_name, *source_str, line_num) != -1) {
+      CreateNativeCodeRegion(buf.start(), ptr, size);
+    } else {
+      CreateNativeCodeRegion("<script/func name too long>", ptr, size);
+    }
+  }
+#endif
+}
+} }
diff --git a/src/oprofile-agent.h b/src/oprofile-agent.h
new file mode 100644
index 0000000..75cfe18
--- /dev/null
+++ b/src/oprofile-agent.h
@@ -0,0 +1,68 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_OPROFILE_AGENT_H_
+#define V8_OPROFILE_AGENT_H_
+
+#include <stdlib.h>
+
+#include "globals.h"
+
+#ifdef ENABLE_OPROFILE_AGENT
+// opagent.h uses uint64_t type, which can be missing in
+// system headers (they have __uint64_t), but is defined
+// in V8's headers.
+#include <opagent.h>  // NOLINT
+#endif
+
+namespace v8 { namespace internal {
+
+class OProfileAgent {
+ public:
+  static bool Initialize();
+  static void TearDown();
+  static void CreateNativeCodeRegion(const char* name,
+                                     const void* ptr, unsigned int size);
+  static void CreateNativeCodeRegion(String* name,
+                                     const void* ptr, unsigned int size);
+  static void CreateNativeCodeRegion(String* name, String* source, int line_num,
+                                     const void* ptr, unsigned int size);
+#ifdef ENABLE_OPROFILE_AGENT
+  static bool is_enabled() { return handle_ != NULL; }
+
+ private:
+  static op_agent_t handle_;
+
+  // Size of the buffer that is used for composing code areas names.
+  static const int kFormattingBufSize = 256;
+#else
+  static bool is_enabled() { return false; }
+#endif
+};
+} }
+
+#endif  // V8_OPROFILE_AGENT_H_
diff --git a/src/parser.cc b/src/parser.cc
index 6d46878..5487bf3 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -158,10 +158,12 @@
 
   // Decide if a property should be the object boilerplate.
   bool IsBoilerplateProperty(ObjectLiteral::Property* property);
-  // If the property is CONSTANT type, it returns the literal value,
-  // otherwise, it return undefined literal as the placeholder
+  // If the expression is a literal, return the literal value;
+  // if the expression is a materialized literal and is simple return a
+  // compile time value as encoded by CompileTimeValue::GetValue().
+  // Otherwise, return undefined literal as the placeholder
   // in the object literal boilerplate.
-  Literal* GetBoilerplateValue(ObjectLiteral::Property* property);
+  Handle<Object> GetBoilerplateValue(Expression* expression);
 
   enum FunctionLiteralType {
     EXPRESSION,
@@ -205,7 +207,7 @@
   BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
   IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok);
 
-  void RegisterLabelUse(Label* label, int index);
+  void RegisterTargetUse(BreakTarget* target, int index);
 
   // Create a number literal.
   Literal* NewNumberLiteral(double value);
@@ -2050,8 +2052,8 @@
                           bool is_catch_block,
                           bool* ok) {
   // Parse the statement and collect escaping labels.
-  ZoneList<Label*>* label_list = NEW(ZoneList<Label*>(0));
-  LabelCollector collector(label_list);
+  ZoneList<BreakTarget*>* target_list = NEW(ZoneList<BreakTarget*>(0));
+  TargetCollector collector(target_list);
   Statement* stat;
   { Target target(this, &collector);
     with_nesting_level_++;
@@ -2064,7 +2066,7 @@
   // 2: The try-finally block evaluating the body.
   Block* result = NEW(Block(NULL, 2, false));
 
-  if (result) {
+  if (result != NULL) {
     result->AddStatement(NEW(WithEnterStatement(obj, is_catch_block)));
 
     // Create body block.
@@ -2077,12 +2079,10 @@
 
     // Return a try-finally statement.
     TryFinally* wrapper = NEW(TryFinally(body, exit));
-    wrapper->set_escaping_labels(collector.labels());
+    wrapper->set_escaping_targets(collector.targets());
     result->AddStatement(wrapper);
-    return result;
-  } else {
-    return NULL;
   }
+  return result;
 }
 
 
@@ -2197,8 +2197,8 @@
 
   Expect(Token::TRY, CHECK_OK);
 
-  ZoneList<Label*>* label_list = NEW(ZoneList<Label*>(0));
-  LabelCollector collector(label_list);
+  ZoneList<BreakTarget*>* target_list = NEW(ZoneList<BreakTarget*>(0));
+  TargetCollector collector(target_list);
   Block* try_block;
 
   { Target target(this, &collector);
@@ -2217,10 +2217,11 @@
   }
 
   // If we can break out from the catch block and there is a finally block,
-  // then we will need to collect labels from the catch block. Since we don't
-  // know yet if there will be a finally block, we always collect the labels.
-  ZoneList<Label*>* catch_label_list = NEW(ZoneList<Label*>(0));
-  LabelCollector catch_collector(catch_label_list);
+  // then we will need to collect jump targets from the catch block. Since
+  // we don't know yet if there will be a finally block, we always collect
+  // the jump targets.
+  ZoneList<BreakTarget*>* catch_target_list = NEW(ZoneList<BreakTarget*>(0));
+  TargetCollector catch_collector(catch_target_list);
   bool has_catch = false;
   if (tok == Token::CATCH) {
     has_catch = true;
@@ -2260,7 +2261,7 @@
 
   if (!is_pre_parsing_ && catch_block != NULL && finally_block != NULL) {
     TryCatch* statement = NEW(TryCatch(try_block, catch_var, catch_block));
-    statement->set_escaping_labels(collector.labels());
+    statement->set_escaping_targets(collector.targets());
     try_block = NEW(Block(NULL, 1, false));
     try_block->AddStatement(statement);
     catch_block = NULL;
@@ -2271,15 +2272,15 @@
     if (catch_block != NULL) {
       ASSERT(finally_block == NULL);
       result = NEW(TryCatch(try_block, catch_var, catch_block));
-      result->set_escaping_labels(collector.labels());
+      result->set_escaping_targets(collector.targets());
     } else {
       ASSERT(finally_block != NULL);
       result = NEW(TryFinally(try_block, finally_block));
-      // Add the labels of the try block and the catch block.
-      for (int i = 0; i < collector.labels()->length(); i++) {
-        catch_collector.labels()->Add(collector.labels()->at(i));
+      // Add the jump targets of the try block and the catch block.
+      for (int i = 0; i < collector.targets()->length(); i++) {
+        catch_collector.targets()->Add(collector.targets()->at(i));
       }
-      result->set_escaping_labels(catch_collector.labels());
+      result->set_escaping_targets(catch_collector.targets());
     }
   }
 
@@ -3057,6 +3058,7 @@
 
   // Update the scope information before the pre-parsing bailout.
   temp_scope_->set_contains_array_literal();
+  int literal_index = temp_scope_->NextMaterializedLiteralIndex();
 
   if (is_pre_parsing_) return NULL;
 
@@ -3065,16 +3067,19 @@
       Factory::NewFixedArray(values.length(), TENURED);
 
   // Fill in the literals.
+  bool is_simple = true;
   for (int i = 0; i < values.length(); i++) {
-    Literal* literal = values.at(i)->AsLiteral();
-    if (literal == NULL) {
+    Handle<Object> boilerplate_value = GetBoilerplateValue(values.at(i));
+    if (boilerplate_value->IsUndefined()) {
       literals->set_the_hole(i);
+      is_simple = false;
     } else {
-      literals->set(i, *literal->handle());
+      literals->set(i, *boilerplate_value);
     }
   }
 
-  return NEW(ArrayLiteral(literals, values.elements()));
+  return NEW(ArrayLiteral(literals, values.elements(),
+                          literal_index, is_simple));
 }
 
 
@@ -3084,10 +3089,48 @@
 }
 
 
-Literal* Parser::GetBoilerplateValue(ObjectLiteral::Property* property) {
-  if (property->kind() == ObjectLiteral::Property::CONSTANT)
-    return property->value()->AsLiteral();
-  return GetLiteralUndefined();
+bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
+  MaterializedLiteral* lit = expression->AsMaterializedLiteral();
+  return lit != NULL && lit->is_simple();
+}
+
+Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
+  ASSERT(IsCompileTimeValue(expression));
+  Handle<FixedArray> result = Factory::NewFixedArray(2, TENURED);
+  ObjectLiteral* object_literal = expression->AsObjectLiteral();
+  if (object_literal != NULL) {
+    ASSERT(object_literal->is_simple());
+    result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL));
+    result->set(kElementsSlot, *object_literal->constant_properties());
+  } else {
+    ArrayLiteral* array_literal = expression->AsArrayLiteral();
+    ASSERT(array_literal != NULL && array_literal->is_simple());
+    result->set(kTypeSlot, Smi::FromInt(ARRAY_LITERAL));
+    result->set(kElementsSlot, *array_literal->literals());
+  }
+  return result;
+}
+
+
+CompileTimeValue::Type CompileTimeValue::GetType(Handle<FixedArray> value) {
+  Smi* type_value = Smi::cast(value->get(kTypeSlot));
+  return static_cast<Type>(type_value->value());
+}
+
+
+Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
+  return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot)));
+}
+
+
+Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
+  if (expression->AsLiteral() != NULL) {
+    return expression->AsLiteral()->handle();
+  }
+  if (CompileTimeValue::IsCompileTimeValue(expression)) {
+    return CompileTimeValue::GetValue(expression);
+  }
+  return Factory::undefined_value();
 }
 
 
@@ -3182,24 +3225,30 @@
   Handle<FixedArray> constant_properties =
       Factory::NewFixedArray(number_of_boilerplate_properties * 2, TENURED);
   int position = 0;
+  bool is_simple = true;
   for (int i = 0; i < properties.length(); i++) {
     ObjectLiteral::Property* property = properties.at(i);
-    if (!IsBoilerplateProperty(property)) continue;
+    if (!IsBoilerplateProperty(property)) {
+      is_simple = false;
+      continue;
+    }
 
     // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
     // value for COMPUTED properties, the real value is filled in at
     // runtime. The enumeration order is maintained.
     Handle<Object> key = property->key()->handle();
-    Literal* literal = GetBoilerplateValue(property);
+    Handle<Object> value = GetBoilerplateValue(property->value());
+    is_simple = is_simple && !value->IsUndefined();
 
     // Add name, value pair to the fixed array.
     constant_properties->set(position++, *key);
-    constant_properties->set(position++, *literal->handle());
+    constant_properties->set(position++, *value);
   }
 
   return new ObjectLiteral(constant_properties,
                            properties.elements(),
-                           literal_index);
+                           literal_index,
+                           is_simple);
 }
 
 
@@ -3506,7 +3555,7 @@
 
     if ((anonymous && stat->is_target_for_anonymous()) ||
         (!anonymous && ContainsLabel(stat->labels(), label))) {
-      RegisterLabelUse(stat->break_target(), i);
+      RegisterTargetUse(stat->break_target(), i);
       return stat;
     }
   }
@@ -3523,7 +3572,7 @@
 
     ASSERT(stat->is_target_for_anonymous());
     if (anonymous || ContainsLabel(stat->labels(), label)) {
-      RegisterLabelUse(stat->continue_target(), i);
+      RegisterTargetUse(stat->continue_target(), i);
       return stat;
     }
   }
@@ -3531,13 +3580,13 @@
 }
 
 
-void Parser::RegisterLabelUse(Label* label, int index) {
-  // Register that a label found at the given index in the target
-  // stack has been used from the top of the target stack. Add the
-  // label to any LabelCollectors passed on the stack.
+void Parser::RegisterTargetUse(BreakTarget* target, int index) {
+  // Register that a break target found at the given index in the
+  // target stack has been used from the top of the target stack. Add
+  // the break target to any TargetCollectors passed on the stack.
   for (int i = target_stack_->length(); i-- > index;) {
-    LabelCollector* collector = target_stack_->at(i)->AsLabelCollector();
-    if (collector != NULL) collector->AddLabel(label);
+    TargetCollector* collector = target_stack_->at(i)->AsTargetCollector();
+    if (collector != NULL) collector->AddTarget(target);
   }
 }
 
@@ -4128,10 +4177,8 @@
 STATIC_CHECK(('a' ^ 'A') == 0x20);
 
 uc32 RegExpParser::ParseControlLetterEscape() {
-  if (!has_more()) {
-    ReportError(CStrVector("\\c at end of pattern"));
-    return '\0';
-  }
+  if (!has_more())
+    return 'c';
   uc32 letter = current() & ~(0x20);  // Collapse upper and lower case letters.
   if (letter < 'A' || 'Z' < letter) {
     // Non-spec error-correction: "\c" followed by non-control letter is
diff --git a/src/parser.h b/src/parser.h
index ee303b4..4c1401c 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -29,6 +29,7 @@
 #define V8_PARSER_H_
 
 #include "scanner.h"
+#include "allocation.h"
 
 namespace v8 { namespace internal {
 
@@ -165,6 +166,35 @@
                              int end_position,
                              bool is_expression);
 
+
+// Support for handling complex values (array and object literals) that
+// can be fully handled at compile time.
+class CompileTimeValue: public AllStatic {
+ public:
+  enum Type {
+    OBJECT_LITERAL,
+    ARRAY_LITERAL
+  };
+
+  static bool IsCompileTimeValue(Expression* expression);
+
+  // Get the value as a compile time value.
+  static Handle<FixedArray> GetValue(Expression* expression);
+
+  // Get the type of a compile time value returned by GetValue().
+  static Type GetType(Handle<FixedArray> value);
+
+  // Get the elements array of a compile time value returned by GetValue().
+  static Handle<FixedArray> GetElements(Handle<FixedArray> value);
+
+ private:
+  static const int kTypeSlot = 0;
+  static const int kElementsSlot = 1;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(CompileTimeValue);
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_PARSER_H_
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index fb2fba6..b1e0b73 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -656,7 +656,6 @@
 
   // Data Transimission
   int Send(const char* data, int len) const;
-  bool SendAll(const char* data, int len) const;
   int Receive(char* data, int len) const;
 
   bool IsValid() const { return socket_ != -1; }
@@ -736,19 +735,6 @@
 }
 
 
-bool FreeBSDSocket::SendAll(const char* data, int len) const {
-  int sent_len = 0;
-  while (sent_len < len) {
-    int status = Send(data, len);
-    if (status <= 0) {
-      return false;
-    }
-    sent_len += status;
-  }
-  return true;
-}
-
-
 int FreeBSDSocket::Receive(char* data, int len) const {
   int status = recv(socket_, data, len, 0);
   return status;
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index c001f51..09eee2e 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -640,7 +640,6 @@
 
   // Data Transimission
   int Send(const char* data, int len) const;
-  bool SendAll(const char* data, int len) const;
   int Receive(char* data, int len) const;
 
   bool IsValid() const { return socket_ != -1; }
@@ -720,19 +719,6 @@
 }
 
 
-bool LinuxSocket::SendAll(const char* data, int len) const {
-  int sent_len = 0;
-  while (sent_len < len) {
-    int status = Send(data, len);
-    if (status <= 0) {
-      return false;
-    }
-    sent_len += status;
-  }
-  return true;
-}
-
-
 int LinuxSocket::Receive(char* data, int len) const {
   int status = recv(socket_, data, len, 0);
   return status;
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 9bbdcfc..4892e2a 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -607,7 +607,6 @@
 
   // Data Transimission
   int Send(const char* data, int len) const;
-  bool SendAll(const char* data, int len) const;
   int Receive(char* data, int len) const;
 
   bool IsValid() const { return socket_ != -1; }
@@ -693,19 +692,6 @@
 }
 
 
-bool MacOSSocket::SendAll(const char* data, int len) const {
-  int sent_len = 0;
-  while (sent_len < len) {
-    int status = Send(data, len);
-    if (status <= 0) {
-      return false;
-    }
-    sent_len += status;
-  }
-  return true;
-}
-
-
 int MacOSSocket::Receive(char* data, int len) const {
   int status = recv(socket_, data, len, 0);
   return status;
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index cc725cb..fd65738 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1583,7 +1583,6 @@
 
   // Data Transimission
   int Send(const char* data, int len) const;
-  bool SendAll(const char* data, int len) const;
   int Receive(char* data, int len) const;
 
   bool IsValid() const { return socket_ != INVALID_SOCKET; }
@@ -1663,19 +1662,6 @@
 }
 
 
-bool Win32Socket::SendAll(const char* data, int len) const {
-  int sent_len = 0;
-  while (sent_len < len) {
-    int status = Send(data, len);
-    if (status <= 0) {
-      return false;
-    }
-    sent_len += status;
-  }
-  return true;
-}
-
-
 int Win32Socket::Receive(char* data, int len) const {
   int status = recv(socket_, data, len, 0);
   return status;
diff --git a/src/platform.h b/src/platform.h
index dbd6384..3293708 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -437,7 +437,6 @@
 
   // Data Transimission
   virtual int Send(const char* data, int len) const = 0;
-  virtual bool SendAll(const char* data, int len) const = 0;
   virtual int Receive(char* data, int len) const = 0;
 
   virtual bool IsValid() const = 0;
@@ -467,24 +466,9 @@
   unsigned int sp;  // Stack pointer.
   unsigned int fp;  // Frame pointer.
   StateTag state;   // The state of the VM.
-  SmartPointer<Address> stack;  // Call stack, null-terminated.
-
-  inline TickSample& operator=(const TickSample& rhs) {
-    if (this == &rhs) return *this;
-    pc = rhs.pc;
-    sp = rhs.sp;
-    fp = rhs.fp;
-    state = rhs.state;
-    DeleteArray(stack.Detach());
-    stack = rhs.stack;
-    return *this;
-  }
-
-  inline void InitStack(int depth) {
-    stack = SmartPointer<Address>(NewArray<Address>(depth + 1));
-    // null-terminate
-    stack[depth] = 0;
-  }
+  static const int kMaxFramesCount = 5;
+  EmbeddedVector<Address, kMaxFramesCount> stack;  // Call stack.
+  int frames_count;  // Number of captured frames.
 };
 
 class Sampler {
diff --git a/src/property.h b/src/property.h
index 914b8dd..65d4a0d 100644
--- a/src/property.h
+++ b/src/property.h
@@ -356,6 +356,10 @@
     return type() == NULL_DESCRIPTOR;
   }
 
+  bool IsProperty() {
+    return type() < FIRST_PHANTOM_PROPERTY_TYPE;
+  }
+
   JSFunction* GetConstantFunction() { return JSFunction::cast(GetValue()); }
 
   AccessorDescriptor* GetCallbacks() {
diff --git a/src/regexp-delay.js b/src/regexp-delay.js
index f1ded9d..7994875 100644
--- a/src/regexp-delay.js
+++ b/src/regexp-delay.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,7 +52,7 @@
   var multiline = false;
 
   for (var i = 0; i < flags.length; i++) {
-    var c = flags.charAt(i);
+    var c = StringCharAt.call(flags, i);
     switch (c) {
       case 'g':
         // Allow duplicate flags to be consistent with JSC and others.
@@ -117,15 +117,15 @@
 
 // Deprecated RegExp.prototype.compile method.  We behave like the constructor
 // were called again.  In SpiderMonkey, this method returns the regexp object.
-// In KJS, it returns undefined.  For compatibility with KJS, we match their
+// In JSC, it returns undefined.  For compatibility with JSC, we match their
 // behavior.
 function CompileRegExp(pattern, flags) {
-  // Both KJS and SpiderMonkey treat a missing pattern argument as the
+  // Both JSC and SpiderMonkey treat a missing pattern argument as the
   // empty subject string, and an actual undefined value passed as the
-  // patter as the string 'undefined'.  Note that KJS is inconsistent
+  // pattern as the string 'undefined'.  Note that JSC is inconsistent
   // here, treating undefined values differently in
   // RegExp.prototype.compile and in the constructor, where they are
-  // the empty string.  For compatibility with KJS, we match their
+  // the empty string.  For compatibility with JSC, we match their
   // behavior.
   if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
     DoConstructRegExp(this, 'undefined', flags, false);
@@ -135,32 +135,24 @@
 }
 
 
-// DoRegExpExec and DoRegExpExecGlobal are wrappers around the runtime
-// %RegExp and %RegExpGlobal functions that ensure that the static
-// properties of the RegExp constructor are set.
 function DoRegExpExec(regexp, string, index) {
-  var matchIndices = %RegExpExec(regexp, string, index);
-  if (!IS_NULL(matchIndices)) {
-    regExpCaptures = matchIndices;
-    regExpSubject = regExpInput = string;
-  }
-  return matchIndices;
+  return %RegExpExec(regexp, string, index, lastMatchInfo);
 }
 
 
 function DoRegExpExecGlobal(regexp, string) {
-  // Here, matchIndices is an array of arrays of substring indices.
-  var matchIndices = %RegExpExecGlobal(regexp, string);
-  if (matchIndices.length != 0) {
-    regExpCaptures = matchIndices[matchIndices.length - 1];
-    regExpSubject = regExpInput = string;
-  }
-  return matchIndices;
+  // Returns an array of arrays of substring indices.
+  return %RegExpExecGlobal(regexp, string, lastMatchInfo);
 }
 
 
 function RegExpExec(string) {
+  if (!IS_REGEXP(this)) {
+    throw MakeTypeError('method_called_on_incompatible',
+                        ['RegExp.prototype.exec', this]);
+  }
   if (%_ArgumentsLength() == 0) {
+    var regExpInput = LAST_INPUT(lastMatchInfo);
     if (IS_UNDEFINED(regExpInput)) {
       throw MakeError('no_input_to_regexp', [this]);
     }
@@ -177,23 +169,21 @@
   }
 
   %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
-  // matchIndices is an array of integers with length of captures*2,
-  // each pair of integers specified the start and the end of index
-  // in the string.
-  var matchIndices = DoRegExpExec(this, s, i);
+  // matchIndices is either null or the lastMatchInfo array.
+  var matchIndices = %RegExpExec(this, s, i, lastMatchInfo);
 
   if (matchIndices == null) {
     if (this.global) this.lastIndex = 0;
     return matchIndices; // no match
   }
 
-  var numResults = matchIndices.length >> 1;
+  var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
   var result = new $Array(numResults);
   for (var i = 0; i < numResults; i++) {
-    var matchStart = matchIndices[2*i];
-    var matchEnd = matchIndices[2*i + 1];
+    var matchStart = lastMatchInfo[CAPTURE(i << 1)];
+    var matchEnd = lastMatchInfo[CAPTURE((i << 1) + 1)];
     if (matchStart != -1 && matchEnd != -1) {
-      result[i] = s.slice(matchStart, matchEnd);
+      result[i] = SubString(s, matchStart, matchEnd);
     } else {
       // Make sure the element is present. Avoid reading the undefined
       // property from the global object since this may change.
@@ -202,16 +192,50 @@
   }
 
   if (this.global)
-    this.lastIndex = matchIndices[1];
-  result.index = matchIndices[0];
+    this.lastIndex = lastMatchInfo[CAPTURE1];
+  result.index = lastMatchInfo[CAPTURE0];
   result.input = s;
   return result;
 }
 
 
+// Section 15.10.6.3 doesn't actually make sense, but the intention seems to be
+// that test is defined in terms of String.prototype.exec even if the method is
+// called on a non-RegExp object. However, it probably means the original
+// value of String.prototype.exec, which is what everybody else implements.
 function RegExpTest(string) {
-  var result = (%_ArgumentsLength() == 0) ? this.exec() : this.exec(string);
-  return result != null;
+  if (!IS_REGEXP(this)) {
+    throw MakeTypeError('method_called_on_incompatible',
+                        ['RegExp.prototype.test', this]);
+  }
+  if (%_ArgumentsLength() == 0) {
+    var regExpInput = LAST_INPUT(lastMatchInfo);
+    if (IS_UNDEFINED(regExpInput)) {
+      throw MakeError('no_input_to_regexp', [this]);
+    }
+    string = regExpInput;
+  }
+  var s = ToString(string);
+  var length = s.length;
+  var lastIndex = this.lastIndex;
+  var i = this.global ? TO_INTEGER(lastIndex) : 0;
+
+  if (i < 0 || i > s.length) {
+    this.lastIndex = 0;
+    return false;
+  }
+
+  %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
+  // matchIndices is either null or the lastMatchInfo array.
+  var matchIndices = %RegExpExec(this, s, i, lastMatchInfo);
+
+  if (matchIndices == null) {
+    if (this.global) this.lastIndex = 0;
+    return false;
+  }
+
+  if (this.global) this.lastIndex = lastMatchInfo[CAPTURE1];
+  return true;
 }
 
 
@@ -236,56 +260,72 @@
 // on the captures array of the last successful match and the subject string
 // of the last successful match.
 function RegExpGetLastMatch() {
-  return regExpSubject.slice(regExpCaptures[0], regExpCaptures[1]);
+  var regExpSubject = LAST_SUBJECT(lastMatchInfo);
+  return SubString(regExpSubject,
+                   lastMatchInfo[CAPTURE0],
+                   lastMatchInfo[CAPTURE1]);
 }
 
 
 function RegExpGetLastParen() {
-  var length = regExpCaptures.length;
-  if (length <= 2) return ''; // There were no captures.
+  var length = NUMBER_OF_CAPTURES(lastMatchInfo);
+  if (length <= 2) return '';  // There were no captures.
   // We match the SpiderMonkey behavior: return the substring defined by the
   // last pair (after the first pair) of elements of the capture array even if
   // it is empty.
-  return regExpSubject.slice(regExpCaptures[length - 2],
-                             regExpCaptures[length - 1]);
+  var regExpSubject = LAST_SUBJECT(lastMatchInfo);
+  var start = lastMatchInfo[CAPTURE(length - 2)];
+  var end = lastMatchInfo[CAPTURE(length - 1)];
+  if (start != -1 && end != -1) {
+    return SubString(regExpSubject, start, end);
+  }
+  return "";
 }
 
 
 function RegExpGetLeftContext() {
-  return regExpSubject.slice(0, regExpCaptures[0]);
+  return SubString(LAST_SUBJECT(lastMatchInfo),
+                   0,
+                   lastMatchInfo[CAPTURE0]);
 }
 
 
 function RegExpGetRightContext() {
-  return regExpSubject.slice(regExpCaptures[1], regExpSubject.length);
+  var subject = LAST_SUBJECT(lastMatchInfo);
+  return SubString(subject,
+                   lastMatchInfo[CAPTURE1],
+                   subject.length);
 }
 
 
 // The properties $1..$9 are the first nine capturing substrings of the last
 // successful match, or ''.  The function RegExpMakeCaptureGetter will be
-// called with an index greater than or equal to 1 but it actually works for
-// any non-negative index.
+// called with indeces from 1 to 9.
 function RegExpMakeCaptureGetter(n) {
   return function() {
     var index = n * 2;
-    if (index >= regExpCaptures.length) return '';
-    var matchStart = regExpCaptures[index];
-    var matchEnd = regExpCaptures[index + 1];
+    if (index >= NUMBER_OF_CAPTURES(lastMatchInfo)) return '';
+    var matchStart = lastMatchInfo[CAPTURE(index)];
+    var matchEnd = lastMatchInfo[CAPTURE(index + 1)];
     if (matchStart == -1 || matchEnd == -1) return '';
-    return regExpSubject.slice(matchStart, matchEnd);
+    return SubString(LAST_SUBJECT(lastMatchInfo), matchStart, matchEnd);
   };
 }
 
 
-// Properties of the builtins object for recording the result of the last
-// regexp match.  The property regExpCaptures is the matchIndices array of the
-// last successful regexp match (an array of start/end index pairs for the
-// match and all the captured substrings), the invariant is that there is at
-// least two elements.  The property regExpSubject is the subject string for
-// the last successful match.
-var regExpCaptures = [0, 0];
-var regExpSubject = '';
-var regExpInput;
+// Property of the builtins object for recording the result of the last
+// regexp match.  The property lastMatchInfo includes the matchIndices
+// array of the last successful regexp match (an array of start/end index
+// pairs for the match and all the captured substrings), the invariant is
+// that there are at least two capture indeces.  The array also contains
+// the subject string for the last successful match.
+var lastMatchInfo = [
+    2,                 // REGEXP_NUMBER_OF_CAPTURES
+    0,                 // REGEXP_FIRST_CAPTURE + 0
+    0,                 // REGEXP_FIRST_CAPTURE + 1
+    "",                // Last subject.
+    void 0,            // Last input - settable with RegExpSetInput.
+];
 
 // -------------------------------------------------------------------
 
@@ -302,10 +342,6 @@
     "compile", CompileRegExp
   ));
 
-  // The spec says nothing about the length of exec and test, but
-  // SpiderMonkey and KJS have length equal to 0.
-  %FunctionSetLength($RegExp.prototype.exec, 0);
-  %FunctionSetLength($RegExp.prototype.test, 0);
   // The length of compile is 1 in SpiderMonkey.
   %FunctionSetLength($RegExp.prototype.compile, 1);
 
@@ -313,9 +349,13 @@
   // value is set the value it is set to is coerced to a string. 
   // Getter and setter for the input.
   function RegExpGetInput() {
+    var regExpInput = LAST_INPUT(lastMatchInfo);
     return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
   }
-  function RegExpSetInput(string) { regExpInput = ToString(string); }
+  function RegExpSetInput(string) {
+    lastMatchInfo[lastMatchInfo[REGEXP_NUMBER_OF_CAPTURES] + 2] =
+        ToString(string);
+  };
 
   %DefineAccessor($RegExp, 'input', GETTER, RegExpGetInput, DONT_DELETE);
   %DefineAccessor($RegExp, 'input', SETTER, RegExpSetInput, DONT_DELETE);
diff --git a/src/register-allocator-arm.cc b/src/register-allocator-arm.cc
new file mode 100644
index 0000000..b48710a
--- /dev/null
+++ b/src/register-allocator-arm.cc
@@ -0,0 +1,96 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "register-allocator.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+  UNIMPLEMENTED();
+}
+
+
+void Result::ToRegister(Register target) {
+  UNIMPLEMENTED();
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+RegisterFile RegisterAllocator::Reserved() {
+  RegisterFile reserved;
+  reserved.Use(sp);
+  reserved.Use(fp);
+  reserved.Use(cp);
+  reserved.Use(pc);
+  return reserved;
+}
+
+
+void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
+  register_file->ref_counts_[sp.code()] = 0;
+  register_file->ref_counts_[fp.code()] = 0;
+  register_file->ref_counts_[cp.code()] = 0;
+  register_file->ref_counts_[pc.code()] = 0;
+}
+
+
+void RegisterAllocator::Initialize() {
+  Reset();
+  // The following registers are live on function entry, saved in the
+  // frame, and available for allocation during execution.
+  Use(r1);  // JS function.
+  Use(lr);  // Return address.
+}
+
+
+void RegisterAllocator::Reset() {
+  registers_.Reset();
+  // The following registers are live on function entry and reserved
+  // during execution.
+  Use(sp);  // Stack pointer.
+  Use(fp);  // Frame pointer (caller's frame pointer on entry).
+  Use(cp);  // Context context (callee's context on entry).
+  Use(pc);  // Program counter.
+}
+
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+  UNIMPLEMENTED();
+  Result invalid(cgen_);
+  return invalid;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/register-allocator-ia32.cc b/src/register-allocator-ia32.cc
new file mode 100644
index 0000000..6149187
--- /dev/null
+++ b/src/register-allocator-ia32.cc
@@ -0,0 +1,130 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "register-allocator.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+  ASSERT(is_valid());
+  if (is_constant()) {
+    Result fresh = cgen_->allocator()->Allocate();
+    ASSERT(fresh.is_valid());
+    if (cgen_->IsUnsafeSmi(handle())) {
+      cgen_->LoadUnsafeSmi(fresh.reg(), handle());
+    } else {
+      cgen_->masm()->Set(fresh.reg(), Immediate(handle()));
+    }
+    // This result becomes a copy of the fresh one.
+    *this = fresh;
+  }
+  ASSERT(is_register());
+}
+
+
+void Result::ToRegister(Register target) {
+  ASSERT(is_valid());
+  if (!is_register() || !reg().is(target)) {
+    Result fresh = cgen_->allocator()->Allocate(target);
+    ASSERT(fresh.is_valid());
+    if (is_register()) {
+      cgen_->masm()->mov(fresh.reg(), reg());
+    } else {
+      ASSERT(is_constant());
+      if (cgen_->IsUnsafeSmi(handle())) {
+        cgen_->LoadUnsafeSmi(fresh.reg(), handle());
+      } else {
+        cgen_->masm()->Set(fresh.reg(), Immediate(handle()));
+      }
+    }
+    *this = fresh;
+  } else if (is_register() && reg().is(target)) {
+    ASSERT(cgen_->has_valid_frame());
+    cgen_->frame()->Spill(target);
+    ASSERT(cgen_->allocator()->count(target) == 1);
+  }
+  ASSERT(is_register());
+  ASSERT(reg().is(target));
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+RegisterFile RegisterAllocator::Reserved() {
+  RegisterFile reserved;
+  reserved.Use(esp);
+  reserved.Use(ebp);
+  reserved.Use(esi);
+  return reserved;
+}
+
+
+void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
+  register_file->ref_counts_[esp.code()] = 0;
+  register_file->ref_counts_[ebp.code()] = 0;
+  register_file->ref_counts_[esi.code()] = 0;
+}
+
+
+void RegisterAllocator::Initialize() {
+  Reset();
+  // The following register is live on function entry, saved in the
+  // frame, and available for allocation during execution.
+  Use(edi);  // JS function.
+}
+
+
+void RegisterAllocator::Reset() {
+  registers_.Reset();
+  // The following registers are live on function entry and reserved
+  // during execution.
+  Use(esp);  // Stack pointer.
+  Use(ebp);  // Frame pointer (caller's frame pointer on entry).
+  Use(esi);  // Context (callee's context on entry).
+}
+
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+  Result result = AllocateWithoutSpilling();
+  // Check that the register is a byte register.  If not, unuse the
+  // register if valid and return an invalid result.
+  if (result.is_valid() && !result.reg().is_byte_register()) {
+    result.Unuse();
+    return Result(cgen_);
+  }
+  return result;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/register-allocator.cc b/src/register-allocator.cc
new file mode 100644
index 0000000..7a96882
--- /dev/null
+++ b/src/register-allocator.cc
@@ -0,0 +1,129 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "register-allocator.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+Result::Result(Register reg, CodeGenerator* cgen)
+  : type_(REGISTER),
+    cgen_(cgen) {
+  data_.reg_ = reg;
+  ASSERT(reg.is_valid());
+  cgen_->allocator()->Use(reg);
+}
+
+
+void Result::CopyTo(Result* destination) const {
+  destination->type_ = type();
+  destination->cgen_ = cgen_;
+
+  if (is_register()) {
+    destination->data_.reg_ = reg();
+    cgen_->allocator()->Use(reg());
+  } else if (is_constant()) {
+    destination->data_.handle_ = data_.handle_;
+  } else {
+    ASSERT(!is_valid());
+  }
+}
+
+
+void Result::Unuse() {
+  if (is_register()) {
+    cgen_->allocator()->Unuse(reg());
+  }
+  type_ = INVALID;
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterFile implementation.
+
+void RegisterFile::CopyTo(RegisterFile* other) {
+  for (int i = 0; i < kNumRegisters; i++) {
+    other->ref_counts_[i] = ref_counts_[i];
+  }
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+
+Result RegisterAllocator::AllocateWithoutSpilling() {
+  // Return the first free register, if any.
+  for (int i = 0; i < kNumRegisters; i++) {
+    if (!is_used(i)) {
+      Register free_reg = { i };
+      return Result(free_reg, cgen_);
+    }
+  }
+  return Result(cgen_);
+}
+
+
+Result RegisterAllocator::Allocate() {
+  Result result = AllocateWithoutSpilling();
+  if (!result.is_valid()) {
+    // Ask the current frame to spill a register.
+    ASSERT(cgen_->has_valid_frame());
+    Register free_reg = cgen_->frame()->SpillAnyRegister();
+    if (free_reg.is_valid()) {
+      ASSERT(!is_used(free_reg));
+      return Result(free_reg, cgen_);
+    }
+  }
+  return result;
+}
+
+
+Result RegisterAllocator::Allocate(Register target) {
+  // If the target is not referenced, it can simply be allocated.
+  if (!is_used(target)) {
+    return Result(target, cgen_);
+  }
+  // If the target is only referenced in the frame, it can be spilled and
+  // then allocated.
+  ASSERT(cgen_->has_valid_frame());
+  if (count(target) == cgen_->frame()->register_count(target)) {
+    cgen_->frame()->Spill(target);
+    ASSERT(!is_used(target));
+    return Result(target, cgen_);
+  }
+  // Otherwise (if it's referenced outside the frame) we cannot allocate it.
+  return Result(cgen_);
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/register-allocator.h b/src/register-allocator.h
new file mode 100644
index 0000000..759ab14
--- /dev/null
+++ b/src/register-allocator.h
@@ -0,0 +1,237 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_REGISTER_ALLOCATOR_H_
+#define V8_REGISTER_ALLOCATOR_H_
+
+#include "macro-assembler.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Results
+//
+// Results encapsulate the compile-time values manipulated by the code
+// generator.  They can represent registers or constants.
+
+class Result BASE_EMBEDDED {
+ public:
+  enum Type {
+    INVALID,
+    REGISTER,
+    CONSTANT
+  };
+
+  // Construct an invalid result.
+  explicit Result(CodeGenerator* cgen) : type_(INVALID), cgen_(cgen) {}
+
+  // Construct a register Result.
+  Result(Register reg, CodeGenerator* cgen);
+
+  // Construct a Result whose value is a compile-time constant.
+  Result(Handle<Object> value, CodeGenerator * cgen)
+      : type_(CONSTANT),
+        cgen_(cgen) {
+    data_.handle_ = value.location();
+  }
+
+  // The copy constructor and assignment operators could each create a new
+  // register reference.
+  Result(const Result& other) {
+    other.CopyTo(this);
+  }
+
+  Result& operator=(const Result& other) {
+    if (this != &other) {
+      Unuse();
+      other.CopyTo(this);
+    }
+    return *this;
+  }
+
+  ~Result() { Unuse(); }
+
+  void Unuse();
+
+  Type type() const { return type_; }
+
+  bool is_valid() const { return type() != INVALID; }
+  bool is_register() const { return type() == REGISTER; }
+  bool is_constant() const { return type() == CONSTANT; }
+
+  Register reg() const {
+    ASSERT(type() == REGISTER);
+    return data_.reg_;
+  }
+
+  Handle<Object> handle() const {
+    ASSERT(type() == CONSTANT);
+    return Handle<Object>(data_.handle_);
+  }
+
+  // Move this result to an arbitrary register.  The register is not
+  // necessarily spilled from the frame or even singly-referenced outside
+  // it.
+  void ToRegister();
+
+  // Move this result to a specified register.  The register is spilled from
+  // the frame, and the register is singly-referenced (by this result)
+  // outside the frame.
+  void ToRegister(Register reg);
+
+ private:
+  Type type_;
+
+  union {
+    Register reg_;
+    Object** handle_;
+  } data_;
+
+  CodeGenerator* cgen_;
+
+  void CopyTo(Result* destination) const;
+};
+
+
+// -------------------------------------------------------------------------
+// Register file
+//
+// The register file tracks reference counts for the processor registers.
+// It is used by both the register allocator and the virtual frame.
+
+class RegisterFile BASE_EMBEDDED {
+ public:
+  RegisterFile() { Reset(); }
+
+  void Reset() {
+    for (int i = 0; i < kNumRegisters; i++) {
+      ref_counts_[i] = 0;
+    }
+  }
+
+  // Predicates and accessors for the reference counts.  The versions
+  // that take a register code rather than a register are for
+  // convenience in loops over the register codes.
+  bool is_used(int reg_code) const { return ref_counts_[reg_code] > 0; }
+  bool is_used(Register reg) const { return is_used(reg.code()); }
+  int count(int reg_code) const { return ref_counts_[reg_code]; }
+  int count(Register reg) const { return count(reg.code()); }
+
+  // Record a use of a register by incrementing its reference count.
+  void Use(Register reg) {
+    ref_counts_[reg.code()]++;
+  }
+
+  // Record that a register will no longer be used by decrementing its
+  // reference count.
+  void Unuse(Register reg) {
+    ASSERT(is_used(reg.code()));
+    if (is_used(reg.code())) {
+      ref_counts_[reg.code()]--;
+    }
+  }
+
+  // Copy the reference counts from this register file to the other.
+  void CopyTo(RegisterFile* other);
+
+ private:
+  int ref_counts_[kNumRegisters];
+
+  friend class RegisterAllocator;
+};
+
+
+// -------------------------------------------------------------------------
+// Register allocator
+//
+
+class RegisterAllocator BASE_EMBEDDED {
+ public:
+  explicit RegisterAllocator(CodeGenerator* cgen) : cgen_(cgen) {}
+
+  // A register file with each of the reserved registers counted once.
+  static RegisterFile Reserved();
+
+  // Unuse all the reserved registers in a register file.
+  static void UnuseReserved(RegisterFile* register_file);
+
+  // Predicates and accessors for the registers' reference counts.
+  bool is_used(int reg_code) const { return registers_.is_used(reg_code); }
+  bool is_used(Register reg) const { return registers_.is_used(reg.code()); }
+  int count(int reg_code) const { return registers_.count(reg_code); }
+  int count(Register reg) const { return registers_.count(reg.code()); }
+
+  // Explicitly record a reference to a register.
+  void Use(Register reg) { registers_.Use(reg); }
+
+  // Explicitly record that a register will no longer be used.
+  void Unuse(Register reg) { registers_.Unuse(reg); }
+
+  // Initialize the register allocator for entry to a JS function.  On
+  // entry, the registers used by the JS calling convention are
+  // externally referenced (ie, outside the virtual frame); and the
+  // other registers are free.
+  void Initialize();
+
+  // Reset the register reference counts to free all non-reserved registers.
+  // A frame-external reference is kept to each of the reserved registers.
+  void Reset();
+
+  // Allocate a free register and return a register result if possible or
+  // fail and return an invalid result.
+  Result Allocate();
+
+  // Allocate a specific register if possible, spilling it from the frame if
+  // necessary, or else fail and return an invalid result.
+  Result Allocate(Register target);
+
+  // Allocate a free register without spilling any from the current frame or
+  // fail and return an invalid result.
+  Result AllocateWithoutSpilling();
+
+  // Allocate a free byte register without spilling any from the
+  // current frame or fail and return an invalid result.
+  Result AllocateByteRegisterWithoutSpilling();
+
+  // Copy the internal state to a register file, to be restored later by
+  // RestoreFrom.
+  void SaveTo(RegisterFile* register_file) {
+    registers_.CopyTo(register_file);
+  }
+
+  void RestoreFrom(RegisterFile* register_file) {
+    register_file->CopyTo(&registers_);
+  }
+
+ private:
+  CodeGenerator* cgen_;
+  RegisterFile registers_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_REGISTER_ALLOCATOR_H_
diff --git a/src/runtime.cc b/src/runtime.cc
index b04af51..bd81704 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -43,6 +43,7 @@
 #include "scopeinfo.h"
 #include "v8threads.h"
 #include "smart-pointer.h"
+#include "parser.h"
 
 namespace v8 { namespace internal {
 
@@ -69,6 +70,13 @@
   RUNTIME_ASSERT(obj->IsBoolean());                                   \
   bool name = (obj)->IsTrue();
 
+// Cast the given object to an int and store it in a variable with
+// the given name.  If the object is not a Smi call IllegalOperation
+// and return.
+#define CONVERT_INT_CHECKED(name, obj)                            \
+  RUNTIME_ASSERT(obj->IsSmi());                                   \
+  int name = Smi::cast(obj)->value();
+
 // Cast the given object to a double and store it in a variable with
 // the given name.  If the object is not a number (as opposed to
 // the number not-a-number) call IllegalOperation and return.
@@ -84,7 +92,7 @@
   type name = NumberTo##Type(obj);
 
 // Non-reentrant string buffer for efficient general use in this file.
-static StaticResource<StringInputBuffer> string_input_buffer;
+static StaticResource<StringInputBuffer> runtime_string_input_buffer;
 
 
 static Object* IllegalOperation() {
@@ -92,7 +100,7 @@
 }
 
 
-static Object* Runtime_CloneObjectLiteralBoilerplate(Arguments args) {
+static Object* Runtime_CloneLiteralBoilerplate(Arguments args) {
   CONVERT_CHECKED(JSObject, boilerplate, args[0]);
   return Heap::CopyJSObject(boilerplate);
 }
@@ -131,14 +139,14 @@
 }
 
 
-static Object* Runtime_CreateObjectLiteralBoilerplate(Arguments args) {
-  HandleScope scope;
-  ASSERT(args.length() == 3);
-  // Copy the arguments.
-  Handle<FixedArray> literals = args.at<FixedArray>(0);
-  int literals_index = Smi::cast(args[1])->value();
-  Handle<FixedArray> constant_properties = args.at<FixedArray>(2);
+static Handle<Object> CreateLiteralBoilerplate(
+    Handle<FixedArray> literals,
+    Handle<FixedArray> constant_properties);
 
+
+static Handle<Object> CreateObjectLiteralBoilerplate(
+    Handle<FixedArray> literals,
+    Handle<FixedArray> constant_properties) {
   // Get the global context from the literals array.  This is the
   // context in which the function was created and we use the object
   // function from this context to create the object literal.  We do
@@ -161,6 +169,13 @@
     for (int index = 0; index < length; index +=2) {
       Handle<Object> key(constant_properties->get(index+0));
       Handle<Object> value(constant_properties->get(index+1));
+      if (value->IsFixedArray()) {
+        // The value contains the constant_properties of a
+        // simple object literal.
+        Handle<FixedArray> array = Handle<FixedArray>::cast(value);
+        value = CreateLiteralBoilerplate(literals, array);
+        if (value.is_null()) return value;
+      }
       Handle<Object> result;
       uint32_t element_index = 0;
       if (key->IsSymbol()) {
@@ -185,39 +200,96 @@
       // exception, the exception is converted to an empty handle in
       // the handle based operations.  In that case, we need to
       // convert back to an exception.
-      if (result.is_null()) return Failure::Exception();
+      if (result.is_null()) return result;
     }
   }
 
-  // Update the functions literal and return the boilerplate.
-  literals->set(literals_index, *boilerplate);
-
-  return *boilerplate;
+  return boilerplate;
 }
 
 
-static Object* Runtime_CreateArrayLiteral(Arguments args) {
+static Handle<Object> CreateArrayLiteralBoilerplate(
+    Handle<FixedArray> literals,
+    Handle<FixedArray> elements) {
+  // Create the JSArray.
+  Handle<JSFunction> constructor(
+      JSFunction::GlobalContextFromLiterals(*literals)->array_function());
+  Handle<Object> object = Factory::NewJSObject(constructor);
+
+  Handle<Object> copied_elements = Factory::CopyFixedArray(elements);
+
+  Handle<FixedArray> content = Handle<FixedArray>::cast(copied_elements);
+  for (int i = 0; i < content->length(); i++) {
+    if (content->get(i)->IsFixedArray()) {
+      // The value contains the constant_properties of a
+      // simple object literal.
+      Handle<FixedArray> fa(FixedArray::cast(content->get(i)));
+      Handle<Object> result = CreateLiteralBoilerplate(literals, fa);
+      if (result.is_null()) return result;
+      content->set(i, *result);
+    }
+  }
+
+  // Set the elements.
+  Handle<JSArray>::cast(object)->SetContent(*content);
+  return object;
+}
+
+
+static Handle<Object> CreateLiteralBoilerplate(
+    Handle<FixedArray> literals,
+    Handle<FixedArray> array) {
+  Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
+  switch (CompileTimeValue::GetType(array)) {
+    case CompileTimeValue::OBJECT_LITERAL:
+      return CreateObjectLiteralBoilerplate(literals, elements);
+    case CompileTimeValue::ARRAY_LITERAL:
+      return CreateArrayLiteralBoilerplate(literals, elements);
+    default:
+      UNREACHABLE();
+      return Handle<Object>::null();
+  }
+}
+
+
+static Object* Runtime_CreateObjectLiteralBoilerplate(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+  // Copy the arguments.
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_INT_CHECKED(literals_index, args[1]);
+  CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
+
+  Handle<Object> result =
+    CreateObjectLiteralBoilerplate(literals, constant_properties);
+
+  if (result.is_null()) return Failure::Exception();
+
+  // Update the functions literal and return the boilerplate.
+  literals->set(literals_index, *result);
+
+  return *result;
+}
+
+
+static Object* Runtime_CreateArrayLiteralBoilerplate(Arguments args) {
   // Takes a FixedArray of elements containing the literal elements of
   // the array literal and produces JSArray with those elements.
   // Additionally takes the literals array of the surrounding function
   // which contains the context from which to get the Array function
   // to use for creating the array literal.
-  ASSERT(args.length() == 2);
-  CONVERT_CHECKED(FixedArray, elements, args[0]);
-  CONVERT_CHECKED(FixedArray, literals, args[1]);
-  JSFunction* constructor =
-      JSFunction::GlobalContextFromLiterals(literals)->array_function();
-  // Create the JSArray.
-  Object* object = Heap::AllocateJSObject(constructor);
-  if (object->IsFailure()) return object;
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_INT_CHECKED(literals_index, args[1]);
+  CONVERT_ARG_CHECKED(FixedArray, elements, 2);
 
-  // Copy the elements.
-  Object* content = elements->Copy();
-  if (content->IsFailure()) return content;
+  Handle<Object> object = CreateArrayLiteralBoilerplate(literals, elements);
+  if (object.is_null()) return Failure::Exception();
 
-  // Set the elements.
-  JSArray::cast(object)->SetContent(FixedArray::cast(content));
-  return object;
+  // Update the functions literal and return the boilerplate.
+  literals->set(literals_index, *object);
+  return *object;
 }
 
 
@@ -858,14 +930,21 @@
 
 static Object* Runtime_RegExpExec(Arguments args) {
   HandleScope scope;
-  ASSERT(args.length() == 3);
+  ASSERT(args.length() == 4);
   CONVERT_CHECKED(JSRegExp, raw_regexp, args[0]);
   Handle<JSRegExp> regexp(raw_regexp);
   CONVERT_CHECKED(String, raw_subject, args[1]);
   Handle<String> subject(raw_subject);
-  Handle<Object> index(args[2]);
-  ASSERT(index->IsNumber());
-  Handle<Object> result = RegExpImpl::Exec(regexp, subject, index);
+  // Due to the way the JS files are constructed this must be less than the
+  // length of a string, i.e. it is always a Smi.  We check anyway for security.
+  CONVERT_CHECKED(Smi, index, args[2]);
+  CONVERT_CHECKED(JSArray, raw_last_match_info, args[3]);
+  Handle<JSArray> last_match_info(raw_last_match_info);
+  CHECK(last_match_info->HasFastElements());
+  Handle<Object> result = RegExpImpl::Exec(regexp,
+                                           subject,
+                                           index->value(),
+                                           last_match_info);
   if (result.is_null()) return Failure::Exception();
   return *result;
 }
@@ -873,12 +952,16 @@
 
 static Object* Runtime_RegExpExecGlobal(Arguments args) {
   HandleScope scope;
-  ASSERT(args.length() == 2);
+  ASSERT(args.length() == 3);
   CONVERT_CHECKED(JSRegExp, raw_regexp, args[0]);
   Handle<JSRegExp> regexp(raw_regexp);
   CONVERT_CHECKED(String, raw_subject, args[1]);
   Handle<String> subject(raw_subject);
-  Handle<Object> result = RegExpImpl::ExecGlobal(regexp, subject);
+  CONVERT_CHECKED(JSArray, raw_last_match_info, args[2]);
+  Handle<JSArray> last_match_info(raw_last_match_info);
+  CHECK(last_match_info->HasFastElements());
+  Handle<Object> result =
+      RegExpImpl::ExecGlobal(regexp, subject, last_match_info);
   if (result.is_null()) return Failure::Exception();
   return *result;
 }
@@ -2310,7 +2393,7 @@
   int escaped_length = 0;
   int length = source->length();
   {
-    Access<StringInputBuffer> buffer(&string_input_buffer);
+    Access<StringInputBuffer> buffer(&runtime_string_input_buffer);
     buffer->Reset(source);
     while (buffer->has_more()) {
       uint16_t character = buffer->GetNext();
@@ -2338,7 +2421,7 @@
   StringShape dshape(destination);
   int dest_position = 0;
 
-  Access<StringInputBuffer> buffer(&string_input_buffer);
+  Access<StringInputBuffer> buffer(&runtime_string_input_buffer);
   buffer->Rewind();
   while (buffer->has_more()) {
     uint16_t chr = buffer->GetNext();
@@ -2574,7 +2657,7 @@
 
   // Convert all characters to upper case, assuming that they will fit
   // in the buffer
-  Access<StringInputBuffer> buffer(&string_input_buffer);
+  Access<StringInputBuffer> buffer(&runtime_string_input_buffer);
   buffer->Reset(s);
   unibrow::uchar chars[Converter::kMaxWidth];
   int i = 0;
@@ -4949,7 +5032,7 @@
   ASSERT(args.length() >= 1);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   // Check that the break id is valid.
-  if (Top::break_id() == 0 || break_id != Top::break_id()) {
+  if (Debug::break_id() == 0 || break_id != Debug::break_id()) {
     return Top::Throw(Heap::illegal_execution_state_symbol());
   }
 
@@ -4967,7 +5050,7 @@
 
   // Count all frames which are relevant to debugging stack trace.
   int n = 0;
-  StackFrame::Id id = Top::break_frame_id();
+  StackFrame::Id id = Debug::break_frame_id();
   if (id == StackFrame::NO_ID) {
     // If there is no JavaScript stack frame count is 0.
     return Smi::FromInt(0);
@@ -5012,7 +5095,7 @@
   CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
 
   // Find the relevant frame with the requested index.
-  StackFrame::Id id = Top::break_frame_id();
+  StackFrame::Id id = Debug::break_frame_id();
   if (id == StackFrame::NO_ID) {
     // If there are no JavaScript stack frames return undefined.
     return Heap::undefined_value();
@@ -5700,17 +5783,13 @@
   NoHandleAllocation ha;
   AssertNoAllocation no_alloc;
 
-  // Get hold of the current empty script.
-  Context* context = Top::context()->global_context();
-  Script* empty = context->empty_script();
-
   // Scan heap for Script objects.
   int count = 0;
   HeapIterator iterator;
   while (iterator.has_next()) {
     HeapObject* obj = iterator.next();
     ASSERT(obj != NULL);
-    if (obj->IsScript() && obj != empty) {
+    if (obj->IsScript()) {
       if (instances != NULL && count < instances_size) {
         instances->set(count, obj);
       }
diff --git a/src/runtime.h b/src/runtime.h
index 34aa4b9..079a60f 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -137,8 +137,8 @@
   \
   /* Regular expressions */ \
   F(RegExpCompile, 3) \
-  F(RegExpExec, 3) \
-  F(RegExpExecGlobal, 2) \
+  F(RegExpExec, 4) \
+  F(RegExpExecGlobal, 3) \
   \
   /* Strings */ \
   F(StringCharCodeAt, 2) \
@@ -250,9 +250,9 @@
   \
   /* Literals */ \
   F(MaterializeRegExpLiteral, 4)\
-  F(CreateArrayLiteral, 2) \
+  F(CreateArrayLiteralBoilerplate, 3) \
   F(CreateObjectLiteralBoilerplate, 3) \
-  F(CloneObjectLiteralBoilerplate, 1) \
+  F(CloneLiteralBoilerplate, 1) \
   \
   /* Catch context extension objects */ \
   F(CreateCatchExtensionObject, 2) \
@@ -325,7 +325,6 @@
     kNofFunctions
 #undef F
   };
-  static Object* CreateArrayLiteral(Arguments args);
 
   // Runtime function descriptor.
   struct Function {
diff --git a/src/scopes.cc b/src/scopes.cc
index 0a381df..e959f02 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -822,7 +822,8 @@
         var->rewrite_ =
           new Property(arguments_shadow_,
                        new Literal(Handle<Object>(Smi::FromInt(i))),
-                       RelocInfo::kNoPosition);
+                       RelocInfo::kNoPosition,
+                       Property::SYNTHETIC);
         arguments_shadow->var_uses()->RecordUses(var->var_uses());
       }
     }
diff --git a/src/simulator-arm.cc b/src/simulator-arm.cc
index 7047e9c..5a61107 100644
--- a/src/simulator-arm.cc
+++ b/src/simulator-arm.cc
@@ -1525,7 +1525,7 @@
 
 
 Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2,
-                           int32_t p3, int32_t p4) {
+                        int32_t p3, int32_t p4) {
   // Setup parameters
   set_register(r0, p0);
   set_register(r1, p1);
diff --git a/src/string.js b/src/string.js
index 18d47ca..f8efab6 100644
--- a/src/string.js
+++ b/src/string.js
@@ -62,7 +62,7 @@
 
 // ECMA-262, section 15.5.4.4
 function StringCharAt(pos) {
-  var char_code = %_FastCharCodeAt(subject, index);
+  var char_code = %_FastCharCodeAt(this, index);
   if (!%_IsSmi(char_code)) {
     var subject = ToString(this);
     var index = TO_INTEGER(pos);
@@ -165,8 +165,9 @@
   // Build the result array.
   var result = new $Array(match_string);
   for (var i = 0; i < matches.length; ++i) {
-    var match = matches[i];
-    var match_string = subject.slice(match[0], match[1]);
+    var matchInfo = matches[i];
+    var match_string = subject.slice(matchInfo[CAPTURE0],
+                                     matchInfo[CAPTURE1]);
     result[i] = match_string;
   }
 
@@ -218,7 +219,9 @@
   if (IS_FUNCTION(replace)) {
     builder.add(replace.call(null, search, start, subject));
   } else {
-    ExpandReplacement(ToString(replace), subject, [ start, end ], builder);
+    reusableMatchInfo[CAPTURE0] = start;
+    reusableMatchInfo[CAPTURE1] = end;
+    ExpandReplacement(ToString(replace), subject, reusableMatchInfo, builder);
   }
 
   // suffix
@@ -228,6 +231,15 @@
 }
 
 
+// This has the same size as the lastMatchInfo array, and can be used for
+// functions that expect that structure to be returned.  It is used when the
+// needle is a string rather than a regexp.  In this case we can't update
+// lastMatchArray without erroneously affecting the properties on the global
+// RegExp object.
+var reusableMatchInfo = [2, -1, -1, "", ""];
+var reusableMatchArray = [ void 0 ];
+
+
 // Helper function for regular expressions in String.prototype.replace.
 function StringReplaceRegExp(subject, regexp, replace) {
   // Compute an array of matches; each match is really a list of
@@ -237,9 +249,10 @@
     matches = DoRegExpExecGlobal(regexp, subject);
     if (matches.length == 0) return subject;
   } else {
-    var captures = DoRegExpExec(regexp, subject, 0);
-    if (IS_NULL(captures)) return subject;
-    matches = [ captures ];
+    var lastMatchInfo = DoRegExpExec(regexp, subject, 0);
+    if (IS_NULL(lastMatchInfo)) return subject;
+    reusableMatchArray[0] = lastMatchInfo;
+    matches = reusableMatchArray;
   }
 
   // Determine the number of matches.
@@ -253,17 +266,17 @@
   replace = ToString(replace);
   if (%StringIndexOf(replace, "$", 0) < 0) {
     for (var i = 0; i < length; i++) {
-      var captures = matches[i];
-      result.addSpecialSlice(previous, captures[0]);
+      var matchInfo = matches[i];
+      result.addSpecialSlice(previous, matchInfo[CAPTURE0]);
       result.add(replace);
-      previous = captures[1];  // continue after match
+      previous = matchInfo[CAPTURE1];  // continue after match
     }
   } else {
     for (var i = 0; i < length; i++) {
-      var captures = matches[i];
-      result.addSpecialSlice(previous, captures[0]);
-      ExpandReplacement(replace, subject, captures, result);
-      previous = captures[1];  // continue after match
+      var matchInfo = matches[i];
+      result.addSpecialSlice(previous, matchInfo[CAPTURE0]);
+      ExpandReplacement(replace, subject, matchInfo, result);
+      previous = matchInfo[CAPTURE1];  // continue after match
     }
   }
   result.addSpecialSlice(previous, subject.length);
@@ -273,7 +286,7 @@
 
 // Expand the $-expressions in the string and return a new string with
 // the result.
-function ExpandReplacement(string, subject, captures, builder) {
+function ExpandReplacement(string, subject, matchInfo, builder) {
   var next = %StringIndexOf(string, '$', 0);
   if (next < 0) {
     builder.add(string);
@@ -281,7 +294,7 @@
   }
 
   // Compute the number of captures; see ECMA-262, 15.5.4.11, p. 102.
-  var m = captures.length >> 1;  // includes the match
+  var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;  // Includes the match.
 
   if (next > 0) builder.add(SubString(string, 0, next));
   var length = string.length;
@@ -299,13 +312,14 @@
         builder.add('$');
       } else if (peek == 38) {  // $& - match
         ++position;
-        builder.addSpecialSlice(captures[0], captures[1]);
+        builder.addSpecialSlice(matchInfo[CAPTURE0],
+                                matchInfo[CAPTURE1]);
       } else if (peek == 96) {  // $` - prefix
         ++position;
-        builder.addSpecialSlice(0, captures[0]);
+        builder.addSpecialSlice(0, matchInfo[CAPTURE0]);
       } else if (peek == 39) {  // $' - suffix
         ++position;
-        builder.addSpecialSlice(captures[1], subject.length);
+        builder.addSpecialSlice(matchInfo[CAPTURE1], subject.length);
       } else if (peek >= 48 && peek <= 57) {  // $n, 0 <= n <= 9
         ++position;
         var n = peek - 48;
@@ -329,7 +343,7 @@
           }
         }
         if (0 < n && n < m) {
-          addCaptureString(builder, captures, n);
+          addCaptureString(builder, matchInfo, n);
         } else {
           // Because of the captures range check in the parsing of two
           // digit capture references, we can only enter here when a
@@ -361,26 +375,27 @@
 };
 
 
-// Compute the string of a given PCRE capture.
-function CaptureString(string, captures, index) {
+// Compute the string of a given regular expression capture.
+function CaptureString(string, lastCaptureInfo, index) {
   // Scale the index.
   var scaled = index << 1;
   // Compute start and end.
-  var start = captures[scaled];
-  var end = captures[scaled + 1];
+  var start = lastCaptureInfo[CAPTURE(scaled)];
+  var end = lastCaptureInfo[CAPTURE(scaled + 1)];
   // If either start or end is missing return undefined.
   if (start < 0 || end < 0) return;
   return SubString(string, start, end);
 };
 
 
-// Add the string of a given PCRE capture to the ReplaceResultBuilder
-function addCaptureString(builder, captures, index) {
+// Add the string of a given regular expression capture to the
+// ReplaceResultBuilder
+function addCaptureString(builder, matchInfo, index) {
   // Scale the index.
   var scaled = index << 1;
   // Compute start and end.
-  var start = captures[scaled];
-  var end = captures[scaled + 1];
+  var start = matchInfo[CAPTURE(scaled)];
+  var end = matchInfo[CAPTURE(scaled + 1)];
   // If either start or end is missing return.
   if (start < 0 || end <= start) return;
   builder.addSpecialSlice(start, end);
@@ -396,10 +411,8 @@
 // should be 'abcd' and not 'dddd' (or anything else).
 function StringReplaceRegExpWithFunction(subject, regexp, replace) {
   var result = new ReplaceResultBuilder(subject);
-  // Captures is an array of pairs of (start, end) indices for the match and
-  // any captured substrings.
-  var captures = DoRegExpExec(regexp, subject, 0);
-  if (IS_NULL(captures)) return subject;
+  var lastMatchInfo = DoRegExpExec(regexp, subject, 0);
+  if (IS_NULL(lastMatchInfo)) return subject;
 
   // There's at least one match.  If the regexp is global, we have to loop
   // over all matches.  The loop is not in C++ code here like the one in
@@ -409,13 +422,16 @@
   if (regexp.global) {
     var previous = 0;
     do {
-      result.addSpecialSlice(previous, captures[0]);
-      result.add(ApplyReplacementFunction(replace, captures, subject));
+      result.addSpecialSlice(previous, lastMatchInfo[CAPTURE0]);
+      var startOfMatch = lastMatchInfo[CAPTURE0];
+      previous = lastMatchInfo[CAPTURE1];
+      result.add(ApplyReplacementFunction(replace, lastMatchInfo, subject));
+      // Can't use lastMatchInfo any more from here, since the function could
+      // overwrite it.
       // Continue with the next match.
-      previous = captures[1];
       // Increment previous if we matched an empty string, as per ECMA-262
       // 15.5.4.10.
-      if (previous == captures[0]) {
+      if (previous == startOfMatch) {
         // Add the skipped character to the output, if any.
         if (previous < subject.length) {
           result.addSpecialSlice(previous, previous + 1);
@@ -425,19 +441,22 @@
 
       // Per ECMA-262 15.10.6.2, if the previous index is greater than the
       // string length, there is no match
-      captures = (previous > subject.length)
+      lastMatchInfo = (previous > subject.length)
           ? null
           : DoRegExpExec(regexp, subject, previous);
-    } while (!IS_NULL(captures));
+    } while (!IS_NULL(lastMatchInfo));
 
     // Tack on the final right substring after the last match, if necessary.
     if (previous < subject.length) {
       result.addSpecialSlice(previous, subject.length);
     }
   } else { // Not a global regexp, no need to loop.
-    result.addSpecialSlice(0, captures[0]);
-    result.add(ApplyReplacementFunction(replace, captures, subject));
-    result.addSpecialSlice(captures[1], subject.length);
+    result.addSpecialSlice(0, lastMatchInfo[CAPTURE0]);
+    var endOfMatch = lastMatchInfo[CAPTURE1];
+    result.add(ApplyReplacementFunction(replace, lastMatchInfo, subject));
+    // Can't use lastMatchInfo any more from here, since the function could
+    // overwrite it.
+    result.addSpecialSlice(endOfMatch, subject.length);
   }
 
   return result.generate();
@@ -445,20 +464,20 @@
 
 
 // Helper function to apply a string replacement function once.
-function ApplyReplacementFunction(replace, captures, subject) {
+function ApplyReplacementFunction(replace, lastMatchInfo, subject) {
   // Compute the parameter list consisting of the match, captures, index,
   // and subject for the replace function invocation.
-  var index = captures[0];
+  var index = lastMatchInfo[CAPTURE0];
   // The number of captures plus one for the match.
-  var m = captures.length >> 1;
+  var m = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
   if (m == 1) {
-    var s = CaptureString(subject, captures, 0);
+    var s = CaptureString(subject, lastMatchInfo, 0);
     // Don't call directly to avoid exposing the built-in global object.
     return ToString(replace.call(null, s, index, subject));
   }
   var parameters = $Array(m + 2);
   for (var j = 0; j < m; j++) {
-    parameters[j] = CaptureString(subject, captures, j);
+    parameters[j] = CaptureString(subject, lastMatchInfo, j);
   }
   parameters[j] = index;
   parameters[j + 1] = subject;
@@ -559,14 +578,14 @@
       return result;
     }
 
-    var match = splitMatch(sep, subject, currentIndex, startIndex);
+    var lastMatchInfo = splitMatch(sep, subject, currentIndex, startIndex);
 
-    if (IS_NULL(match)) {
+    if (IS_NULL(lastMatchInfo)) {
       result[result.length] = subject.slice(currentIndex, length);
       return result;
     }
 
-    var endIndex = match[0];
+    var endIndex = lastMatchInfo[CAPTURE1];
 
     // We ignore a zero-length match at the currentIndex.
     if (startIndex === endIndex && endIndex === currentIndex) {
@@ -574,11 +593,20 @@
       continue;
     }
 
-    result[result.length] = match[1];
+    result[result.length] =
+        SubString(subject, currentIndex, lastMatchInfo[CAPTURE0]);
     if (result.length === lim) return result;
 
-    for (var i = 2; i < match.length; i++) {
-      result[result.length] = match[i];
+    for (var i = 2; i < NUMBER_OF_CAPTURES(lastMatchInfo); i += 2) {
+      var start = lastMatchInfo[CAPTURE(i)];
+      var end = lastMatchInfo[CAPTURE(i + 1)];
+      if (start != -1 && end != -1) {
+        result[result.length] = SubString(subject,
+                                          lastMatchInfo[CAPTURE(i)],
+                                          lastMatchInfo[CAPTURE(i + 1)]);
+      } else {
+        result[result.length] = void 0;
+      }
       if (result.length === lim) return result;
     }
 
@@ -588,32 +616,24 @@
 
 
 // ECMA-262 section 15.5.4.14
-// Helper function used by split.
+// Helper function used by split.  This version returns the lastMatchInfo
+// instead of allocating a new array with basically the same information.
 function splitMatch(separator, subject, current_index, start_index) {
   if (IS_REGEXP(separator)) {
-    var ovector = DoRegExpExec(separator, subject, start_index);
-    if (ovector == null) return null;
-    var nof_results = ovector.length >> 1;
-    var result = new $Array(nof_results + 1);
+    var lastMatchInfo = DoRegExpExec(separator, subject, start_index);
+    if (lastMatchInfo == null) return null;
     // Section 15.5.4.14 paragraph two says that we do not allow zero length
     // matches at the end of the string.
-    if (ovector[0] === subject.length) return null;
-    result[0] = ovector[1];
-    result[1] = subject.slice(current_index, ovector[0]);
-    for (var i = 1; i < nof_results; i++) {
-      var matching_start = ovector[2*i];
-      var matching_end = ovector[2*i + 1];
-      if (matching_start != -1 && matching_end != -1) {
-        result[i + 1] = subject.slice(matching_start, matching_end);
-      }
-    }
-    return result;
+    if (lastMatchInfo[CAPTURE0] === subject.length) return null;
+    return lastMatchInfo;
   }
 
   var separatorIndex = subject.indexOf(separator, start_index);
   if (separatorIndex === -1) return null;
 
-  return [ separatorIndex + separator.length, subject.slice(current_index, separatorIndex) ];
+  reusableMatchInfo[CAPTURE0] = separatorIndex;
+  reusableMatchInfo[CAPTURE1] = separatorIndex + separator.length;
+  return reusableMatchInfo;
 };
 
 
diff --git a/src/stub-cache-arm.cc b/src/stub-cache-arm.cc
index 45fa1c9..211b643 100644
--- a/src/stub-cache-arm.cc
+++ b/src/stub-cache-arm.cc
@@ -488,13 +488,14 @@
   // Do a tail-call of the compiled function.
   __ Jump(r2);
 
-  return GetCodeWithFlags(flags);
+  return GetCodeWithFlags(flags, "LazyCompileStub");
 }
 
 
 Object* CallStubCompiler::CompileCallField(Object* object,
                                            JSObject* holder,
-                                           int index) {
+                                           int index,
+                                           String* name) {
   // ----------- S t a t e -------------
   //  -- lr: return address
   // -----------------------------------
@@ -538,7 +539,7 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(FIELD);
+  return GetCode(FIELD, name);
 }
 
 
@@ -659,7 +660,11 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(CONSTANT_FUNCTION);
+  String* function_name = NULL;
+  if (function->shared()->name()->IsString()) {
+    function_name = String::cast(function->shared()->name());
+  }
+  return GetCode(CONSTANT_FUNCTION, function_name);
 }
 
 
@@ -679,7 +684,7 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(INTERCEPTOR);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -712,7 +717,7 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION);
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
@@ -767,7 +772,7 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(CALLBACKS);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -819,13 +824,14 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(INTERCEPTOR);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
 Object* LoadStubCompiler::CompileLoadField(JSObject* object,
                                            JSObject* holder,
-                                           int index) {
+                                           int index,
+                                           String* name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -840,13 +846,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(FIELD);
+  return GetCode(FIELD, name);
 }
 
 
 Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
                                               JSObject* holder,
-                                              AccessorInfo* callback) {
+                                              AccessorInfo* callback,
+                                              String* name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -860,13 +867,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(CALLBACKS);
+  return GetCode(CALLBACKS, name);
 }
 
 
 Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
                                               JSObject* holder,
-                                              Object* value) {
+                                              Object* value,
+                                              String* name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -881,7 +889,7 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(CONSTANT_FUNCTION);
+  return GetCode(CONSTANT_FUNCTION, name);
 }
 
 
@@ -902,7 +910,7 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(INTERCEPTOR);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -929,7 +937,7 @@
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
-  return GetCode(FIELD);
+  return GetCode(FIELD, name);
 }
 
 
@@ -955,7 +963,7 @@
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
-  return GetCode(CALLBACKS);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -982,7 +990,7 @@
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(CONSTANT_FUNCTION);
+  return GetCode(CONSTANT_FUNCTION, name);
 }
 
 
@@ -1007,7 +1015,7 @@
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
-  return GetCode(INTERCEPTOR);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -1030,7 +1038,7 @@
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
-  return GetCode(CALLBACKS);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -1055,7 +1063,7 @@
 
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
-  return GetCode(CALLBACKS);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -1068,7 +1076,7 @@
   // -----------------------------------
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
-  return GetCode(CALLBACKS);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -1108,7 +1116,7 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION);
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
diff --git a/src/stub-cache-ia32.cc b/src/stub-cache-ia32.cc
index ad1eb4c..19777e6 100644
--- a/src/stub-cache-ia32.cc
+++ b/src/stub-cache-ia32.cc
@@ -148,9 +148,7 @@
   __ j(zero, miss_label, not_taken);
 
   // Check that the object is a JS array.
-  __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ cmp(scratch, JS_ARRAY_TYPE);
+  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
   __ j(not_equal, miss_label, not_taken);
 
   // Load length directly from the JS array.
@@ -465,13 +463,14 @@
   __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
   __ jmp(Operand(ecx));
 
-  return GetCodeWithFlags(flags);
+  return GetCodeWithFlags(flags, "LazyCompileStub");
 }
 
 
 Object* CallStubCompiler::CompileCallField(Object* object,
                                            JSObject* holder,
-                                           int index) {
+                                           int index,
+                                           String* name) {
   // ----------- S t a t e -------------
   // -----------------------------------
   Label miss;
@@ -493,9 +492,7 @@
   // Check that the function really is a function.
   __ test(edi, Immediate(kSmiTagMask));
   __ j(zero, &miss, not_taken);
-  __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));  // get the map
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ cmp(ebx, JS_FUNCTION_TYPE);
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
   __ j(not_equal, &miss, not_taken);
 
   // Patch the receiver on the stack with the global proxy if
@@ -514,7 +511,7 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(FIELD);
+  return GetCode(FIELD, name);
 }
 
 
@@ -572,9 +569,7 @@
       // Check that the object is a smi or a heap number.
       __ test(edx, Immediate(kSmiTagMask));
       __ j(zero, &fast, taken);
-      __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-      __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-      __ cmp(ecx, HEAP_NUMBER_TYPE);
+      __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
       __ j(not_equal, &miss, not_taken);
       __ bind(&fast);
       // Check that the maps starting from the prototype haven't changed.
@@ -634,7 +629,11 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(CONSTANT_FUNCTION);
+  String* function_name = NULL;
+  if (function->shared()->name()->IsString()) {
+    function_name = String::cast(function->shared()->name());
+  }
+  return GetCode(CONSTANT_FUNCTION, function_name);
 }
 
 
@@ -686,9 +685,7 @@
   // Check that the function really is a function.
   __ test(edi, Immediate(kSmiTagMask));
   __ j(zero, &miss, not_taken);
-  __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ cmp(ebx, JS_FUNCTION_TYPE);
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
   __ j(not_equal, &miss, not_taken);
 
   // Patch the receiver on the stack with the global proxy if
@@ -707,7 +704,7 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(INTERCEPTOR);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -742,7 +739,7 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION);
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
@@ -797,7 +794,7 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(CALLBACKS);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -850,7 +847,7 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(INTERCEPTOR);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -893,13 +890,14 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION);
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
 Object* LoadStubCompiler::CompileLoadField(JSObject* object,
                                            JSObject* holder,
-                                           int index) {
+                                           int index,
+                                           String* name) {
   // ----------- S t a t e -------------
   //  -- ecx    : name
   //  -- esp[0] : return address
@@ -913,13 +911,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(FIELD);
+  return GetCode(FIELD, name);
 }
 
 
 Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
                                               JSObject* holder,
-                                              AccessorInfo* callback) {
+                                              AccessorInfo* callback,
+                                              String* name) {
   // ----------- S t a t e -------------
   //  -- ecx    : name
   //  -- esp[0] : return address
@@ -934,13 +933,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(CALLBACKS);
+  return GetCode(CALLBACKS, name);
 }
 
 
 Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
                                               JSObject* holder,
-                                              Object* value) {
+                                              Object* value,
+                                              String* name) {
   // ----------- S t a t e -------------
   //  -- ecx    : name
   //  -- esp[0] : return address
@@ -954,7 +954,7 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(CONSTANT_FUNCTION);
+  return GetCode(CONSTANT_FUNCTION, name);
 }
 
 
@@ -974,7 +974,7 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(INTERCEPTOR);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -1003,7 +1003,7 @@
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(FIELD);
+  return GetCode(FIELD, name);
 }
 
 
@@ -1033,7 +1033,7 @@
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(CALLBACKS);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -1062,7 +1062,7 @@
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(CONSTANT_FUNCTION);
+  return GetCode(CONSTANT_FUNCTION, name);
 }
 
 
@@ -1090,7 +1090,7 @@
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(INTERCEPTOR);
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -1118,7 +1118,7 @@
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(CALLBACKS);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -1144,7 +1144,7 @@
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(CALLBACKS);
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -1170,7 +1170,7 @@
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(CALLBACKS);
+  return GetCode(CALLBACKS, name);
 }
 
 
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 35b5be3..b7ef311 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -100,7 +100,7 @@
   Object* code = receiver->map()->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     LoadStubCompiler compiler;
-    code = compiler.CompileLoadField(receiver, holder, field_index);
+    code = compiler.CompileLoadField(receiver, holder, field_index, name);
     if (code->IsFailure()) return code;
     LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
@@ -119,7 +119,7 @@
   Object* code = receiver->map()->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     LoadStubCompiler compiler;
-    code = compiler.CompileLoadCallback(receiver, holder, callback);
+    code = compiler.CompileLoadCallback(receiver, holder, callback, name);
     if (code->IsFailure()) return code;
     LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
@@ -138,7 +138,7 @@
   Object* code = receiver->map()->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     LoadStubCompiler compiler;
-    code = compiler.CompileLoadConstant(receiver, holder, value);
+    code = compiler.CompileLoadConstant(receiver, holder, value, name);
     if (code->IsFailure()) return code;
     LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
@@ -434,7 +434,7 @@
   Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     CallStubCompiler compiler(argc);
-    code = compiler.CompileCallField(object, holder, index);
+    code = compiler.CompileCallField(object, holder, index, name);
     if (code->IsFailure()) return code;
     LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
     Object* result = map->UpdateCodeCache(name, Code::cast(code));
@@ -788,7 +788,7 @@
   HandleScope scope;
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   CallIC::GenerateInitialize(masm(), argc);
-  Object* result = GetCodeWithFlags(flags);
+  Object* result = GetCodeWithFlags(flags, "CompileCallInitialize");
   if (!result->IsFailure()) {
     Counters::call_initialize_stubs.Increment();
     Code* code = Code::cast(result);
@@ -803,7 +803,7 @@
   HandleScope scope;
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   CallIC::GenerateInitialize(masm(), argc);
-  Object* result = GetCodeWithFlags(flags);
+  Object* result = GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
   if (!result->IsFailure()) {
     Counters::call_premonomorphic_stubs.Increment();
     Code* code = Code::cast(result);
@@ -818,7 +818,7 @@
   HandleScope scope;
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   CallIC::GenerateNormal(masm(), argc);
-  Object* result = GetCodeWithFlags(flags);
+  Object* result = GetCodeWithFlags(flags, "CompileCallNormal");
   if (!result->IsFailure()) {
     Counters::call_normal_stubs.Increment();
     Code* code = Code::cast(result);
@@ -833,7 +833,7 @@
   HandleScope scope;
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   CallIC::GenerateMegamorphic(masm(), argc);
-  Object* result = GetCodeWithFlags(flags);
+  Object* result = GetCodeWithFlags(flags, "CompileCallMegamorphic");
   if (!result->IsFailure()) {
     Counters::call_megamorphic_stubs.Increment();
     Code* code = Code::cast(result);
@@ -848,7 +848,7 @@
   HandleScope scope;
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   CallIC::GenerateMiss(masm(), argc);
-  Object* result = GetCodeWithFlags(flags);
+  Object* result = GetCodeWithFlags(flags, "CompileCallMiss");
   if (!result->IsFailure()) {
     Counters::call_megamorphic_stubs.Increment();
     Code* code = Code::cast(result);
@@ -862,7 +862,7 @@
 Object* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
   HandleScope scope;
   Debug::GenerateCallICDebugBreak(masm());
-  Object* result = GetCodeWithFlags(flags);
+  Object* result = GetCodeWithFlags(flags, "CompileCallDebugBreak");
   if (!result->IsFailure()) {
     Code* code = Code::cast(result);
     USE(code);
@@ -878,7 +878,7 @@
   // the miss case.
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   CallIC::GenerateMiss(masm(), argc);
-  Object* result = GetCodeWithFlags(flags);
+  Object* result = GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
   if (!result->IsFailure()) {
     Code* code = Code::cast(result);
     USE(code);
@@ -889,45 +889,55 @@
 }
 
 
-Object* StubCompiler::GetCodeWithFlags(Code::Flags flags) {
+Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, const char* name) {
   CodeDesc desc;
   masm_.GetCode(&desc);
   Object* result = Heap::CreateCode(desc, NULL, flags, masm_.CodeObject());
-#ifdef DEBUG
+#ifdef ENABLE_DISASSEMBLER
   if (FLAG_print_code_stubs && !result->IsFailure()) {
-    Code::cast(result)->Print();
+    Code::cast(result)->Disassemble(name);
   }
 #endif
   return result;
 }
 
 
-Object* LoadStubCompiler::GetCode(PropertyType type) {
-  return GetCodeWithFlags(Code::ComputeMonomorphicFlags(Code::LOAD_IC, type));
+Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, String* name) {
+  if (FLAG_print_code_stubs && (name != NULL)) {
+    return GetCodeWithFlags(flags, *name->ToCString());
+  }
+  return GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
 }
 
 
-Object* KeyedLoadStubCompiler::GetCode(PropertyType type) {
-  return GetCodeWithFlags(Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC,
-                                                        type));
+Object* LoadStubCompiler::GetCode(PropertyType type, String* name) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
+  return GetCodeWithFlags(flags, name);
 }
 
 
-Object* StoreStubCompiler::GetCode(PropertyType type) {
-  return GetCodeWithFlags(Code::ComputeMonomorphicFlags(Code::STORE_IC, type));
+Object* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, type);
+  return GetCodeWithFlags(flags, name);
 }
 
 
-Object* KeyedStoreStubCompiler::GetCode(PropertyType type) {
-  return GetCodeWithFlags(Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC,
-                                                        type));
+Object* StoreStubCompiler::GetCode(PropertyType type, String* name) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type);
+  return GetCodeWithFlags(flags, name);
 }
 
 
-Object* CallStubCompiler::GetCode(PropertyType type) {
+Object* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type);
+  return GetCodeWithFlags(flags, name);
+}
+
+
+Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
   int argc = arguments_.immediate();
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC, type, argc);
-  return GetCodeWithFlags(flags);
+  return GetCodeWithFlags(flags, name);
 }
 
 
diff --git a/src/stub-cache.h b/src/stub-cache.h
index ec93c80..05845e5 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -362,7 +362,8 @@
   static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
 
  protected:
-  Object* GetCodeWithFlags(Code::Flags flags);
+  Object* GetCodeWithFlags(Code::Flags flags, const char* name);
+  Object* GetCodeWithFlags(Code::Flags flags, String* name);
 
   MacroAssembler* masm() { return &masm_; }
 
@@ -374,19 +375,24 @@
 
 class LoadStubCompiler: public StubCompiler {
  public:
-  Object* CompileLoadField(JSObject* object, JSObject* holder, int index);
+  Object* CompileLoadField(JSObject* object,
+                           JSObject* holder,
+                           int index,
+                           String* name);
   Object* CompileLoadCallback(JSObject* object,
                               JSObject* holder,
-                              AccessorInfo* callback);
+                              AccessorInfo* callback,
+                              String* name);
   Object* CompileLoadConstant(JSObject* object,
                               JSObject* holder,
-                              Object* value);
+                              Object* value,
+                              String* name);
   Object* CompileLoadInterceptor(JSObject* object,
                                  JSObject* holder,
                                  String* name);
 
  private:
-  Object* GetCode(PropertyType);
+  Object* GetCode(PropertyType type, String* name);
 };
 
 
@@ -412,7 +418,7 @@
   Object* CompileLoadFunctionPrototype(String* name);
 
  private:
-  Object* GetCode(PropertyType);
+  Object* GetCode(PropertyType type, String* name);
 };
 
 
@@ -428,7 +434,7 @@
   Object* CompileStoreInterceptor(JSObject* object, String* name);
 
  private:
-  Object* GetCode(PropertyType type);
+  Object* GetCode(PropertyType type, String* name);
 };
 
 
@@ -440,7 +446,7 @@
                             String* name);
 
  private:
-  Object* GetCode(PropertyType type);
+  Object* GetCode(PropertyType type, String* name);
 };
 
 
@@ -448,7 +454,10 @@
  public:
   explicit CallStubCompiler(int argc) : arguments_(argc) { }
 
-  Object* CompileCallField(Object* object, JSObject* holder, int index);
+  Object* CompileCallField(Object* object,
+                           JSObject* holder,
+                           int index,
+                           String* name);
   Object* CompileCallConstant(Object* object,
                               JSObject* holder,
                               JSFunction* function,
@@ -462,7 +471,7 @@
 
   const ParameterCount& arguments() { return arguments_; }
 
-  Object* GetCode(PropertyType type);
+  Object* GetCode(PropertyType type, String* name);
 };
 
 
diff --git a/src/top.cc b/src/top.cc
index aa0c58e..d6ba49c 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -38,9 +38,6 @@
 
 ThreadLocalTop Top::thread_local_;
 Mutex* Top::break_access_ = OS::CreateMutex();
-StackFrame::Id Top::break_frame_id_;
-int Top::break_count_;
-int Top::break_id_;
 
 NoAllocationStringAllocator* preallocated_message_space = NULL;
 
@@ -222,10 +219,6 @@
 
   InitializeThreadLocal();
 
-  break_frame_id_ = StackFrame::NO_ID;
-  break_count_ = 0;
-  break_id_ = 0;
-
   // Only preallocate on the first initialization.
   if (FLAG_preallocate_message_memory && (preallocated_message_space == NULL)) {
     // Start the thread which will set aside some memory.
@@ -295,44 +288,6 @@
 }
 
 
-void Top::new_break(StackFrame::Id break_frame_id) {
-  ExecutionAccess access;
-  break_frame_id_ = break_frame_id;
-  break_id_ = ++break_count_;
-}
-
-
-void Top::set_break(StackFrame::Id break_frame_id, int break_id) {
-  ExecutionAccess access;
-  break_frame_id_ = break_frame_id;
-  break_id_ = break_id;
-}
-
-
-bool Top::check_break(int break_id) {
-  ExecutionAccess access;
-  return break_id == break_id_;
-}
-
-
-bool Top::is_break() {
-  ExecutionAccess access;
-  return break_id_ != 0;
-}
-
-
-StackFrame::Id Top::break_frame_id() {
-  ExecutionAccess access;
-  return break_frame_id_;
-}
-
-
-int Top::break_id() {
-  ExecutionAccess access;
-  return break_id_;
-}
-
-
 void Top::MarkCompactPrologue(bool is_compacting) {
   MarkCompactPrologue(is_compacting, &thread_local_);
 }
@@ -668,26 +623,6 @@
 }
 
 
-// NOTE: The stack trace frame iterator is an iterator that only
-// traverse proper JavaScript frames; that is JavaScript frames that
-// have proper JavaScript functions. This excludes the problematic
-// functions in runtime.js.
-class StackTraceFrameIterator: public JavaScriptFrameIterator {
- public:
-  StackTraceFrameIterator() {
-    if (!done() && !frame()->function()->IsJSFunction()) Advance();
-  }
-
-  void Advance() {
-    while (true) {
-      JavaScriptFrameIterator::Advance();
-      if (done()) return;
-      if (frame()->function()->IsJSFunction()) return;
-    }
-  }
-};
-
-
 void Top::PrintCurrentStackTrace(FILE* out) {
   StackTraceFrameIterator it;
   while (!it.done()) {
@@ -888,16 +823,36 @@
 
 
 bool Top::optional_reschedule_exception(bool is_bottom_call) {
-  if (!is_out_of_memory() &&
-      (thread_local_.external_caught_exception_ || is_bottom_call)) {
-    thread_local_.external_caught_exception_ = false;
-    clear_pending_exception();
-    return false;
-  } else {
-    thread_local_.scheduled_exception_ = pending_exception();
-    clear_pending_exception();
-    return true;
+  // Allways reschedule out of memory exceptions.
+  if (!is_out_of_memory()) {
+    // Never reschedule the exception if this is the bottom call.
+    bool clear_exception = is_bottom_call;
+
+    // If the exception is externally caught, clear it if there are no
+    // JavaScript frames on the way to the C++ frame that has the
+    // external handler.
+    if (thread_local_.external_caught_exception_) {
+      ASSERT(thread_local_.try_catch_handler_ != NULL);
+      Address external_handler_address =
+          reinterpret_cast<Address>(thread_local_.try_catch_handler_);
+      JavaScriptFrameIterator it;
+      if (it.done() || (it.frame()->sp() > external_handler_address)) {
+        clear_exception = true;
+      }
+    }
+
+    // Clear the exception if needed.
+    if (clear_exception) {
+      thread_local_.external_caught_exception_ = false;
+      clear_pending_exception();
+      return false;
+    }
   }
+
+  // Reschedule the exception.
+  thread_local_.scheduled_exception_ = pending_exception();
+  clear_pending_exception();
+  return true;
 }
 
 
diff --git a/src/top.h b/src/top.h
index 26151bd..708cfe3 100644
--- a/src/top.h
+++ b/src/top.h
@@ -158,10 +158,12 @@
   }
 
   static void setup_external_caught() {
-    thread_local_.external_caught_exception_ =
-        (!thread_local_.pending_exception_->IsTheHole()) &&
-        (thread_local_.catcher_ != NULL) &&
-        (Top::thread_local_.try_catch_handler_ == Top::thread_local_.catcher_);
+    if (!thread_local_.external_caught_exception_) {
+      thread_local_.external_caught_exception_ =
+          has_pending_exception() &&
+          (thread_local_.catcher_ != NULL) &&
+          (thread_local_.try_catch_handler_ == thread_local_.catcher_);
+    }
   }
 
   // Tells whether the current context has experienced an out of memory
@@ -182,13 +184,6 @@
   // Generated code scratch locations.
   static void* formal_count_address() { return &thread_local_.formal_count_; }
 
-  static void new_break(StackFrame::Id break_frame_id);
-  static void set_break(StackFrame::Id break_frame_id, int break_id);
-  static bool check_break(int break_id);
-  static bool is_break();
-  static StackFrame::Id break_frame_id();
-  static int break_id();
-
   static void MarkCompactPrologue(bool is_compacting);
   static void MarkCompactEpilogue(bool is_compacting);
   static void MarkCompactPrologue(bool is_compacting,
@@ -304,15 +299,6 @@
   // Mutex for serializing access to break control structures.
   static Mutex* break_access_;
 
-  // ID of the frame where execution is stopped by debugger.
-  static StackFrame::Id break_frame_id_;
-
-  // Counter to create unique id for each debug break.
-  static int break_count_;
-
-  // Current debug break, 0 if running.
-  static int break_id_;
-
   friend class SaveContext;
   friend class AssertNoContextChange;
   friend class ExecutionAccess;
@@ -326,12 +312,12 @@
 // versions of GCC. See V8 issue 122 for details.
 class SaveContext BASE_EMBEDDED {
  public:
-  SaveContext() :
-      context_(Top::context()),
+  SaveContext()
+      : context_(Top::context()),
 #if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
-      dummy_(Top::context()),
+        dummy_(Top::context()),
 #endif
-      prev_(Top::save_context()) {
+        prev_(Top::save_context()) {
     Top::set_save_context(this);
 
     // If there is no JS frame under the current C frame, use the value 0.
diff --git a/src/uri.js b/src/uri.js
index e47c284..fe659aa 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -90,27 +90,65 @@
 
 
 function URIDecodeOctets(octets, result, index) {
-  if (octets[3]) {
-    var x = (octets[2] >> 4) & 3;
-    var y = octets[2] & 0xF;
-    var z = octets[3] & 63;
-    var v = (((octets[0] & 7) << 2) | ((octets[1] >> 4) & 3)) - 1;
-    var w = octets[1] & 0xF;
-    result[index++] = 55296 | (v << 6) | (w << 2) | x;
-    result[index++] = 56320 | (y << 6) | z;
+  var value;
+  var o0 = octets[0];
+  if (o0 < 0x80) {
+    value = o0;
+  } else if (o0 < 0xc2) {
+    throw new $URIError("URI malformed");
+  } else {
+    var o1 = octets[1];
+    if (o0 < 0xe0) {
+      var a = o0 & 0x1f;
+      if ((o1 < 0x80) || (o1 > 0xbf))
+        throw new $URIError("URI malformed");
+      var b = o1 & 0x3f;
+      value = (a << 6) + b;
+      if (value < 0x80 || value > 0x7ff)
+        throw new $URIError("URI malformed");
+    } else {
+      var o2 = octets[2];
+      if (o0 < 0xf0) {
+        var a = o0 & 0x0f;
+        if ((o1 < 0x80) || (o1 > 0xbf))
+          throw new $URIError("URI malformed");
+        var b = o1 & 0x3f;
+        if ((o2 < 0x80) || (o2 > 0xbf))
+          throw new $URIError("URI malformed");
+        var c = o2 & 0x3f;
+        value = (a << 12) + (b << 6) + c;
+        if ((value < 0x800) || (value > 0xffff))
+          throw new $URIError("URI malformed");
+      } else {
+        var o3 = octets[3];
+        if (o0 < 0xf8) {
+          var a = (o0 & 0x07);
+          if ((o1 < 0x80) || (o1 > 0xbf))
+            throw new $URIError("URI malformed");
+          var b = (o1 & 0x3f);
+          if ((o2 < 0x80) || (o2 > 0xbf))
+            throw new $URIError("URI malformed");
+          var c = (o2 & 0x3f);
+          if ((o3 < 0x80) || (o3 > 0xbf))
+            throw new $URIError("URI malformed");
+          var d = (o3 & 0x3f);
+          value = (a << 18) + (b << 12) + (c << 6) + d;
+          if ((value < 0x10000) || (value > 0x10ffff))
+            throw new $URIError("URI malformed");
+        } else {
+          throw new $URIError("URI malformed");
+        }
+      }
+    }
+  }
+  if (value < 0x10000) {
+    result[index++] = value;
+    return index;
+  } else {
+    result[index++] = (value >> 10) + 0xd7c0;
+    result[index++] = (value & 0x3ff) + 0xdc00;
     return index;
   }
-  if (octets[2]) {
-    var x = octets[0] & 0xF;
-    var y = octets[1] & 63;
-    var z = octets[2] & 63;
-    result[index++] = (x << 12) | (y << 6) | z;
-    return index;
-  }
-  var z = octets[1] & 63;
-  var y = octets[0] & 31;
-  result[index++] = (y << 6) | z;
-  return index;
 }
 
 
diff --git a/src/v8.cc b/src/v8.cc
index cf78c0d..7eb39bc 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -31,6 +31,7 @@
 #include "debug.h"
 #include "serialize.h"
 #include "stub-cache.h"
+#include "oprofile-agent.h"
 
 namespace v8 { namespace internal {
 
@@ -85,6 +86,8 @@
   // objects in place for creating the code object used for probing.
   CPU::Setup();
 
+  OProfileAgent::Initialize();
+
   return true;
 }
 
@@ -93,6 +96,8 @@
   if (HasBeenDisposed()) return;
   if (!HasBeenSetup()) return;
 
+  OProfileAgent::TearDown();
+
   if (FLAG_preemption) {
     v8::Locker locker;
     v8::Locker::StopPreemption();
diff --git a/src/v8.h b/src/v8.h
index 3d84158..4cf0b70 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -92,4 +92,6 @@
 
 } }  // namespace v8::internal
 
+namespace i = v8::internal;
+
 #endif  // V8_V8_H_
diff --git a/src/virtual-frame-arm.cc b/src/virtual-frame-arm.cc
new file mode 100644
index 0000000..d375638
--- /dev/null
+++ b/src/virtual-frame-arm.cc
@@ -0,0 +1,482 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "codegen-inl.h"
+#include "virtual-frame.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+#define __ masm_->
+
+// On entry to a function, the virtual frame already contains the
+// receiver and the parameters.  All initial frame elements are in
+// memory.
+VirtualFrame::VirtualFrame(CodeGenerator* cgen)
+    : cgen_(cgen),
+      masm_(cgen->masm()),
+      elements_(0),
+      parameter_count_(cgen->scope()->num_parameters()),
+      local_count_(0),
+      stack_pointer_(parameter_count_),  // 0-based index of TOS.
+      frame_pointer_(kIllegalIndex) {
+  for (int i = 0; i < parameter_count_ + 1; i++) {
+    elements_.Add(FrameElement::MemoryElement());
+  }
+}
+
+
+// Clear the dirty bit for the element at a given index if it is a
+// valid element.  The stack address corresponding to the element must
+// be allocated on the physical stack, or the first element above the
+// stack pointer so it can be allocated by a single push instruction.
+void VirtualFrame::RawSyncElementAt(int index) {
+  FrameElement element = elements_[index];
+
+  if (!element.is_valid() || element.is_synced()) return;
+
+  if (index <= stack_pointer_) {
+    // Emit code to write elements below the stack pointer to their
+    // (already allocated) stack address.
+    switch (element.type()) {
+      case FrameElement::INVALID:  // Fall through.
+      case FrameElement::MEMORY:
+        // There was an early bailout for invalid and synced elements
+        // (memory elements are always synced).
+        UNREACHABLE();
+        break;
+
+      case FrameElement::REGISTER:
+        __ str(element.reg(), MemOperand(fp, fp_relative(index)));
+        break;
+
+      case FrameElement::CONSTANT: {
+        Result temp = cgen_->allocator()->Allocate();
+        ASSERT(temp.is_valid());
+        __ mov(temp.reg(), Operand(element.handle()));
+        __ str(temp.reg(), MemOperand(fp, fp_relative(index)));
+        break;
+      }
+
+      case FrameElement::COPY: {
+        int backing_index = element.index();
+        FrameElement backing_element = elements_[backing_index];
+        if (backing_element.is_memory()) {
+          Result temp = cgen_->allocator()->Allocate();
+          ASSERT(temp.is_valid());
+          __ ldr(temp.reg(), MemOperand(fp, fp_relative(backing_index)));
+          __ str(temp.reg(), MemOperand(fp, fp_relative(index)));
+        } else {
+          ASSERT(backing_element.is_register());
+          __ str(backing_element.reg(), MemOperand(fp, fp_relative(index)));
+        }
+        break;
+      }
+    }
+
+  } else {
+    // Push elements above the stack pointer to allocate space and
+    // sync them.  Space should have already been allocated in the
+    // actual frame for all the elements below this one.
+    ASSERT(index == stack_pointer_ + 1);
+    stack_pointer_++;
+    switch (element.type()) {
+      case FrameElement::INVALID:  // Fall through.
+      case FrameElement::MEMORY:
+        // There was an early bailout for invalid and synced elements
+        // (memory elements are always synced).
+        UNREACHABLE();
+        break;
+
+      case FrameElement::REGISTER:
+        __ push(element.reg());
+        break;
+
+      case FrameElement::CONSTANT: {
+        Result temp = cgen_->allocator()->Allocate();
+        ASSERT(temp.is_valid());
+        __ mov(temp.reg(), Operand(element.handle()));
+        __ push(temp.reg());
+        break;
+      }
+
+      case FrameElement::COPY: {
+        int backing_index = element.index();
+        FrameElement backing = elements_[backing_index];
+        ASSERT(backing.is_memory() || backing.is_register());
+        if (backing.is_memory()) {
+          Result temp = cgen_->allocator()->Allocate();
+          ASSERT(temp.is_valid());
+          __ ldr(temp.reg(), MemOperand(fp, fp_relative(backing_index)));
+          __ push(temp.reg());
+        } else {
+          __ push(backing.reg());
+        }
+        break;
+      }
+    }
+  }
+
+  elements_[index].set_sync();
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+  Comment cmnt(masm_, "[ Merge frame");
+  // We should always be merging the code generator's current frame to an
+  // expected frame.
+  ASSERT(cgen_->frame() == this);
+
+  // Adjust the stack pointer upward (toward the top of the virtual
+  // frame) if necessary.
+  if (stack_pointer_ < expected->stack_pointer_) {
+    int difference = expected->stack_pointer_ - stack_pointer_;
+    stack_pointer_ = expected->stack_pointer_;
+    __ sub(sp, sp, Operand(difference * kPointerSize));
+  }
+
+  MergeMoveRegistersToMemory(expected);
+  MergeMoveRegistersToRegisters(expected);
+  MergeMoveMemoryToRegisters(expected);
+
+  // Fix any sync bit problems from the bottom-up, stopping when we
+  // hit the stack pointer or the top of the frame if the stack
+  // pointer is floating above the frame.
+  int limit = Min(stack_pointer_, elements_.length() - 1);
+  for (int i = 0; i <= limit; i++) {
+    FrameElement source = elements_[i];
+    FrameElement target = expected->elements_[i];
+    if (source.is_synced() && !target.is_synced()) {
+      elements_[i].clear_sync();
+    } else if (!source.is_synced() && target.is_synced()) {
+      SyncElementAt(i);
+    }
+  }
+
+  // Adjust the stack point downard if necessary.
+  if (stack_pointer_ > expected->stack_pointer_) {
+    int difference = stack_pointer_ - expected->stack_pointer_;
+    stack_pointer_ = expected->stack_pointer_;
+    __ add(sp, sp, Operand(difference * kPointerSize));
+  }
+
+  // At this point, the frames should be identical.
+  ASSERT(Equals(expected));
+}
+
+
+void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
+  ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+  // Move registers, constants, and copies to memory.  Perform moves
+  // from the top downward in the frame in order to leave the backing
+  // stores of copies in registers.
+  // On ARM, all elements are in memory.
+
+#ifdef DEBUG
+  int start = Min(stack_pointer_, elements_.length() - 1);
+  for (int i = start; i >= 0; i--) {
+    ASSERT(elements_[i].is_memory());
+    ASSERT(expected->elements_[i].is_memory());
+  }
+#endif
+}
+
+
+void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
+}
+
+
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
+}
+
+
+void VirtualFrame::Enter() {
+  Comment cmnt(masm_, "[ Enter JS frame");
+
+#ifdef DEBUG
+  // Verify that r1 contains a JS function.  The following code relies
+  // on r2 being available for use.
+  { Label map_check, done;
+    __ tst(r1, Operand(kSmiTagMask));
+    __ b(ne, &map_check);
+    __ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
+    __ bind(&map_check);
+    __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+    __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+    __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+    __ b(eq, &done);
+    __ stop("VirtualFrame::Enter - r1 is not a function (map check).");
+    __ bind(&done);
+  }
+#endif  // DEBUG
+
+  // We are about to push four values to the frame.
+  Adjust(4);
+  __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+  // Adjust FP to point to saved FP.
+  frame_pointer_ = elements_.length() - 2;
+  __ add(fp, sp, Operand(2 * kPointerSize));
+  cgen_->allocator()->Unuse(r1);
+  cgen_->allocator()->Unuse(lr);
+}
+
+
+void VirtualFrame::Exit() {
+  Comment cmnt(masm_, "[ Exit JS frame");
+  // Drop the execution stack down to the frame pointer and restore the caller
+  // frame pointer and return address.
+  __ mov(sp, fp);
+  __ ldm(ia_w, sp, fp.bit() | lr.bit());
+}
+
+
+void VirtualFrame::AllocateStackSlots(int count) {
+  ASSERT(height() == 0);
+  local_count_ = count;
+  Adjust(count);
+  if (count > 0) {
+    Comment cmnt(masm_, "[ Allocate space for locals");
+      // Initialize stack slots with 'undefined' value.
+    __ mov(ip, Operand(Factory::undefined_value()));
+    for (int i = 0; i < count; i++) {
+      __ push(ip);
+    }
+  }
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+  UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+  UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+  UNIMPLEMENTED();
+}
+
+
+// Before changing an element which is copied, adjust so that the
+// first copy becomes the new backing store and all the other copies
+// are updated.  If the original was in memory, the new backing store
+// is allocated to a register.  Return a copy of the new backing store
+// or an invalid element if the original was not a copy.
+FrameElement VirtualFrame::AdjustCopies(int index) {
+  UNIMPLEMENTED();
+  return FrameElement::InvalidElement();
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+  UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+  UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+  // Grow the expression stack by handler size less one (the return address
+  // is already pushed by a call instruction).
+  Adjust(kHandlerSize - 1);
+  __ PushTryHandler(IN_JAVASCRIPT, type);
+}
+
+
+Result VirtualFrame::RawCallStub(CodeStub* stub, int frame_arg_count) {
+  ASSERT(cgen_->HasValidEntryRegisters());
+  __ CallStub(stub);
+  Result result = cgen_->allocator()->Allocate(r0);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::Function* f,
+                                 int frame_arg_count) {
+  PrepareForCall(frame_arg_count, frame_arg_count);
+  ASSERT(cgen_->HasValidEntryRegisters());
+  __ CallRuntime(f, frame_arg_count);
+  Result result = cgen_->allocator()->Allocate(r0);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::FunctionId id,
+                                 int frame_arg_count) {
+  PrepareForCall(frame_arg_count, frame_arg_count);
+  ASSERT(cgen_->HasValidEntryRegisters());
+  __ CallRuntime(id, frame_arg_count);
+  Result result = cgen_->allocator()->Allocate(r0);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+                                   InvokeJSFlags flags,
+                                   Result* arg_count_register,
+                                   int frame_arg_count) {
+  ASSERT(arg_count_register->reg().is(r0));
+  PrepareForCall(frame_arg_count, frame_arg_count);
+  arg_count_register->Unuse();
+  __ InvokeBuiltin(id, flags);
+  Result result = cgen_->allocator()->Allocate(r0);
+  return result;
+}
+
+
+Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+                                       RelocInfo::Mode rmode) {
+  ASSERT(cgen_->HasValidEntryRegisters());
+  __ Call(code, rmode);
+  Result result = cgen_->allocator()->Allocate(r0);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallCodeObject(Handle<Code> code,
+                                    RelocInfo::Mode rmode,
+                                    Result* arg,
+                                    int dropped_args) {
+  int spilled_args = 0;
+  switch (code->kind()) {
+    case Code::LOAD_IC:
+      ASSERT(arg->reg().is(r2));
+      ASSERT(dropped_args == 0);
+      spilled_args = 1;
+      break;
+    case Code::KEYED_STORE_IC:
+      ASSERT(arg->reg().is(r0));
+      ASSERT(dropped_args == 0);
+      spilled_args = 2;
+      break;
+    default:
+      // No other types of code objects are called with values
+      // in exactly one register.
+      UNREACHABLE();
+      break;
+  }
+  PrepareForCall(spilled_args, dropped_args);
+  arg->Unuse();
+  return RawCallCodeObject(code, rmode);
+}
+
+
+Result VirtualFrame::CallCodeObject(Handle<Code> code,
+                                    RelocInfo::Mode rmode,
+                                    Result* arg0,
+                                    Result* arg1,
+                                    int dropped_args) {
+  int spilled_args = 1;
+  switch (code->kind()) {
+    case Code::STORE_IC:
+      ASSERT(arg0->reg().is(r0));
+      ASSERT(arg1->reg().is(r2));
+      ASSERT(dropped_args == 0);
+      spilled_args = 1;
+      break;
+    case Code::BUILTIN:
+      ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
+      ASSERT(arg0->reg().is(r0));
+      ASSERT(arg1->reg().is(r1));
+      spilled_args = dropped_args + 1;
+      break;
+    default:
+      // No other types of code objects are called with values
+      // in exactly two registers.
+      UNREACHABLE();
+      break;
+  }
+  PrepareForCall(spilled_args, dropped_args);
+  arg0->Unuse();
+  arg1->Unuse();
+  return RawCallCodeObject(code, rmode);
+}
+
+
+void VirtualFrame::Drop(int count) {
+  ASSERT(height() >= count);
+  int num_virtual_elements = (elements_.length() - 1) - stack_pointer_;
+
+  // Emit code to lower the stack pointer if necessary.
+  if (num_virtual_elements < count) {
+    int num_dropped = count - num_virtual_elements;
+    stack_pointer_ -= num_dropped;
+    __ add(sp, sp, Operand(num_dropped * kPointerSize));
+  }
+
+  // Discard elements from the virtual frame and free any registers.
+  for (int i = 0; i < count; i++) {
+    FrameElement dropped = elements_.RemoveLast();
+    if (dropped.is_register()) {
+      Unuse(dropped.reg());
+    }
+  }
+}
+
+
+Result VirtualFrame::Pop() {
+  UNIMPLEMENTED();
+  Result invalid(cgen_);
+  return invalid;
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+  ASSERT(stack_pointer_ == elements_.length() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ pop(reg);
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+  ASSERT(stack_pointer_ == elements_.length() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(reg);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/virtual-frame-arm.h b/src/virtual-frame-arm.h
new file mode 100644
index 0000000..99b0a82
--- /dev/null
+++ b/src/virtual-frame-arm.h
@@ -0,0 +1,465 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VIRTUAL_FRAME_ARM_H_
+#define V8_VIRTUAL_FRAME_ARM_H_
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frames
+//
+// The virtual frame is an abstraction of the physical stack frame.  It
+// encapsulates the parameters, frame-allocated locals, and the expression
+// stack.  It supports push/pop operations on the expression stack, as well
+// as random access to the expression stack elements, locals, and
+// parameters.
+
+class VirtualFrame : public Malloced {
+ public:
+  // A utility class to introduce a scope where the virtual frame is
+  // expected to remain spilled.  The constructor spills the code
+  // generator's current frame, but no attempt is made to require it
+  // to stay spilled.  It is intended as documentation while the code
+  // generator is being transformed.
+  class SpilledScope BASE_EMBEDDED {
+   public:
+    explicit SpilledScope(CodeGenerator* cgen);
+
+    ~SpilledScope();
+
+   private:
+    CodeGenerator* cgen_;
+    bool previous_state_;
+  };
+
+  // Construct an initial virtual frame on entry to a JS function.
+  explicit VirtualFrame(CodeGenerator* cgen);
+
+  // Construct a virtual frame as a clone of an existing one.
+  explicit VirtualFrame(VirtualFrame* original);
+
+  // Create a duplicate of an existing valid frame element.
+  FrameElement CopyElementAt(int index);
+
+  // The height of the virtual expression stack.
+  int height() const {
+    return elements_.length() - expression_base_index();
+  }
+
+  int register_count(Register reg) {
+    return frame_registers_.count(reg);
+  }
+
+  // Add extra in-memory elements to the top of the frame to match an actual
+  // frame (eg, the frame after an exception handler is pushed).  No code is
+  // emitted.
+  void Adjust(int count);
+
+  // Forget elements from the top of the frame to match an actual frame (eg,
+  // the frame after a runtime call).  No code is emitted.
+  void Forget(int count);
+
+  // Forget count elements from the top of the frame without adjusting
+  // the stack pointer downward.  This is used, for example, before
+  // merging frames at break, continue, and return targets.
+  void ForgetElements(int count);
+
+  // Spill all values from the frame to memory.
+  void SpillAll();
+
+  // Spill all occurrences of a specific register from the frame.
+  void Spill(Register reg);
+
+  // Spill all occurrences of an arbitrary register if possible.  Return the
+  // register spilled or no_reg if it was not possible to free any register
+  // (ie, they all have frame-external references).
+  Register SpillAnyRegister();
+
+  // Prepare this virtual frame for merging to an expected frame by
+  // performing some state changes that do not require generating
+  // code.  It is guaranteed that no code will be generated.
+  void PrepareMergeTo(VirtualFrame* expected);
+
+  // Make this virtual frame have a state identical to an expected virtual
+  // frame.  As a side effect, code may be emitted to make this frame match
+  // the expected one.
+  void MergeTo(VirtualFrame* expected);
+
+  // Detach a frame from its code generator, perhaps temporarily.  This
+  // tells the register allocator that it is free to use frame-internal
+  // registers.  Used when the code generator's frame is switched from this
+  // one to NULL by an unconditional jump.
+  void DetachFromCodeGenerator();
+
+  // (Re)attach a frame to its code generator.  This informs the register
+  // allocator that the frame-internal register references are active again.
+  // Used when a code generator's frame is switched from NULL to this one by
+  // binding a label.
+  void AttachToCodeGenerator();
+
+  // Emit code for the physical JS entry and exit frame sequences.  After
+  // calling Enter, the virtual frame is ready for use; and after calling
+  // Exit it should not be used.  Note that Enter does not allocate space in
+  // the physical frame for storing frame-allocated locals.
+  void Enter();
+  void Exit();
+
+  // Prepare for returning from the frame by spilling locals and
+  // dropping all non-locals elements in the virtual frame.  This
+  // avoids generating unnecessary merge code when jumping to the
+  // shared return site.  Emits code for spills.
+  void PrepareForReturn();
+
+  // Allocate and initialize the frame-allocated locals.
+  void AllocateStackSlots(int count);
+
+  // The current top of the expression stack as an assembly operand.
+  MemOperand Top() const { return MemOperand(sp, 0); }
+
+  // An element of the expression stack as an assembly operand.
+  MemOperand ElementAt(int index) const {
+    return MemOperand(sp, index * kPointerSize);
+  }
+
+  // Random-access store to a frame-top relative frame element.  The result
+  // becomes owned by the frame and is invalidated.
+  void SetElementAt(int index, Result* value);
+
+  // Set a frame element to a constant.  The index is frame-top relative.
+  void SetElementAt(int index, Handle<Object> value) {
+    Result temp(value, cgen_);
+    SetElementAt(index, &temp);
+  }
+
+  void PushElementAt(int index) {
+    PushFrameSlotAt(elements_.length() - index - 1);
+  }
+
+  // A frame-allocated local as an assembly operand.
+  MemOperand LocalAt(int index) const {
+    ASSERT(0 <= index);
+    ASSERT(index < local_count_);
+    return MemOperand(fp, kLocal0Offset - index * kPointerSize);
+  }
+
+  // Push a copy of the value of a local frame slot on top of the frame.
+  void PushLocalAt(int index) {
+    PushFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the value of a local frame slot on top of the frame and invalidate
+  // the local slot.  The slot should be written to before trying to read
+  // from it again.
+  void TakeLocalAt(int index) {
+    TakeFrameSlotAt(local0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a local frame slot.  The
+  // value is left in place on top of the frame.
+  void StoreToLocalAt(int index) {
+    StoreToFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the address of the receiver slot on the frame.
+  void PushReceiverSlotAddress();
+
+  // The function frame slot.
+  MemOperand Function() const { return MemOperand(fp, kFunctionOffset); }
+
+  // Push the function on top of the frame.
+  void PushFunction() { PushFrameSlotAt(function_index()); }
+
+  // The context frame slot.
+  MemOperand Context() const { return MemOperand(fp, kContextOffset); }
+
+  // Save the value of the esi register to the context frame slot.
+  void SaveContextRegister();
+
+  // Restore the esi register from the value of the context frame
+  // slot.
+  void RestoreContextRegister();
+
+  // A parameter as an assembly operand.
+  MemOperand ParameterAt(int index) const {
+    // Index -1 corresponds to the receiver.
+    ASSERT(-1 <= index && index <= parameter_count_);
+    return MemOperand(fp, (1 + parameter_count_ - index) * kPointerSize);
+  }
+
+  // Push a copy of the value of a parameter frame slot on top of the frame.
+  void PushParameterAt(int index) {
+    PushFrameSlotAt(param0_index() + index);
+  }
+
+  // Push the value of a paramter frame slot on top of the frame and
+  // invalidate the parameter slot.  The slot should be written to before
+  // trying to read from it again.
+  void TakeParameterAt(int index) {
+    TakeFrameSlotAt(param0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a parameter frame slot.
+  // The value is left in place on top of the frame.
+  void StoreToParameterAt(int index) {
+    StoreToFrameSlotAt(param0_index() + index);
+  }
+
+  // The receiver frame slot.
+  MemOperand Receiver() const { return ParameterAt(-1); }
+
+  // Push a try-catch or try-finally handler on top of the virtual frame.
+  void PushTryHandler(HandlerType type);
+
+  // Call a code stub, given the number of arguments it expects on (and
+  // removes from) the top of the physical frame.
+  Result CallStub(CodeStub* stub, int frame_arg_count);
+  Result CallStub(CodeStub* stub, Result* arg, int frame_arg_count);
+  Result CallStub(CodeStub* stub,
+                  Result* arg0,
+                  Result* arg1,
+                  int frame_arg_count);
+
+  // Call the runtime, given the number of arguments expected on (and
+  // removed from) the top of the physical frame.
+  Result CallRuntime(Runtime::Function* f, int frame_arg_count);
+  Result CallRuntime(Runtime::FunctionId id, int frame_arg_count);
+
+  // Invoke a builtin, given the number of arguments it expects on (and
+  // removes from) the top of the physical frame.
+  Result InvokeBuiltin(Builtins::JavaScript id,
+                       InvokeJSFlags flag,
+                       Result* arg_count_register,
+                       int frame_arg_count);
+
+  // Call into a JS code object, given the number of arguments it
+  // removes from the top of the physical frame.
+  // Register arguments are passed as results and consumed by the call.
+  Result CallCodeObject(Handle<Code> ic,
+                        RelocInfo::Mode rmode,
+                        int dropped_args);
+  Result CallCodeObject(Handle<Code> ic,
+                        RelocInfo::Mode rmode,
+                        Result* arg,
+                        int dropped_args);
+  Result CallCodeObject(Handle<Code> ic,
+                        RelocInfo::Mode rmode,
+                        Result* arg0,
+                        Result* arg1,
+                        int dropped_args);
+
+  // Drop a number of elements from the top of the expression stack.  May
+  // emit code to affect the physical frame.  Does not clobber any registers
+  // excepting possibly the stack pointer.
+  void Drop(int count);
+
+  // Drop one element.
+  void Drop() { Drop(1); }
+
+  // Duplicate the top element of the frame.
+  void Dup() { PushFrameSlotAt(elements_.length() - 1); }
+
+  // Pop an element from the top of the expression stack.  Returns a
+  // Result, which may be a constant or a register.
+  Result Pop();
+
+  // Pop and save an element from the top of the expression stack and
+  // emit a corresponding pop instruction.
+  void EmitPop(Register reg);
+
+  // Push an element on top of the expression stack and emit a
+  // corresponding push instruction.
+  void EmitPush(Register reg);
+
+  // Push an element on the virtual frame.
+  void Push(Register reg);
+  void Push(Handle<Object> value);
+  void Push(Smi* value) { Push(Handle<Object>(value)); }
+
+  // Pushing a result invalidates it (its contents become owned by the
+  // frame).
+  void Push(Result* result);
+
+  // Nip removes zero or more elements from immediately below the top
+  // of the frame, leaving the previous top-of-frame value on top of
+  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+  void Nip(int num_dropped);
+
+ private:
+  // An illegal index into the virtual frame.
+  static const int kIllegalIndex = -1;
+
+  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+  static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
+
+  CodeGenerator* cgen_;
+  MacroAssembler* masm_;
+
+  List<FrameElement> elements_;
+
+  // The number of frame-allocated locals and parameters respectively.
+  int parameter_count_;
+  int local_count_;
+
+  // The index of the element that is at the processor's stack pointer
+  // (the sp register).
+  int stack_pointer_;
+
+  // The index of the element that is at the processor's frame pointer
+  // (the fp register).
+  int frame_pointer_;
+
+  // The frame has an embedded register file that it uses to track registers
+  // used in the frame.
+  RegisterFile frame_registers_;
+
+  // The index of the first parameter.  The receiver lies below the first
+  // parameter.
+  int param0_index() const { return 1; }
+
+  // The index of the context slot in the frame.
+  int context_index() const {
+    ASSERT(frame_pointer_ != kIllegalIndex);
+    return frame_pointer_ - 1;
+  }
+
+  // The index of the function slot in the frame.  It lies above the context
+  // slot.
+  int function_index() const {
+    ASSERT(frame_pointer_ != kIllegalIndex);
+    return frame_pointer_ - 2;
+  }
+
+  // The index of the first local.  Between the parameters and the locals
+  // lie the return address, the saved frame pointer, the context, and the
+  // function.
+  int local0_index() const {
+    ASSERT(frame_pointer_ != kIllegalIndex);
+    return frame_pointer_ + 2;
+  }
+
+  // The index of the base of the expression stack.
+  int expression_base_index() const { return local0_index() + local_count_; }
+
+  // Convert a frame index into a frame pointer relative offset into the
+  // actual stack.
+  int fp_relative(int index) const {
+    return (frame_pointer_ - index) * kPointerSize;
+  }
+
+  // Record an occurrence of a register in the virtual frame.  This has the
+  // effect of incrementing both the register's frame-internal reference
+  // count and its external reference count.
+  void Use(Register reg);
+
+  // Record that a register reference has been dropped from the frame.  This
+  // decrements both the register's internal and external reference counts.
+  void Unuse(Register reg);
+
+  // Spill the element at a particular index---write it to memory if
+  // necessary, free any associated register, and forget its value if
+  // constant.
+  void SpillElementAt(int index);
+
+  // Sync the element at a particular index.  If it is a register or
+  // constant that disagrees with the value on the stack, write it to memory.
+  // Keep the element type as register or constant, and clear the dirty bit.
+  void SyncElementAt(int index);
+
+  // Sync the range of elements in [begin, end).
+  void SyncRange(int begin, int end);
+
+  // Sync a single element, assuming that its index is less than
+  // or equal to stack pointer + 1.
+  void RawSyncElementAt(int index);
+
+  // Push a copy of a frame slot (typically a local or parameter) on top of
+  // the frame.
+  void PushFrameSlotAt(int index);
+
+  // Push a the value of a frame slot (typically a local or parameter) on
+  // top of the frame and invalidate the slot.
+  void TakeFrameSlotAt(int index);
+
+  // Store the value on top of the frame to a frame slot (typically a local
+  // or parameter).
+  void StoreToFrameSlotAt(int index);
+
+  // Spill all elements in registers. Spill the top spilled_args elements
+  // on the frame.  Sync all other frame elements.
+  // Then drop dropped_args elements from the virtual frame, to match
+  // the effect of an upcoming call that will drop them from the stack.
+  void PrepareForCall(int spilled_args, int dropped_args);
+
+  // Move frame elements currently in registers or constants, that
+  // should be in memory in the expected frame, to memory.
+  void MergeMoveRegistersToMemory(VirtualFrame* expected);
+
+  // Make the register-to-register moves necessary to
+  // merge this frame with the expected frame.
+  // Register to memory moves must already have been made,
+  // and memory to register moves must follow this call.
+  // This is because some new memory-to-register moves are
+  // created in order to break cycles of register moves.
+  // Used in the implementation of MergeTo().
+  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+
+  // Make the memory-to-register and constant-to-register moves
+  // needed to make this frame equal the expected frame.
+  // Called after all register-to-memory and register-to-register
+  // moves have been made.  After this function returns, the frames
+  // should be equal.
+  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+
+  // Helper function to implement the copy-on-write semantics of an
+  // element's copies just before writing to the element.  The copies
+  // are updated, but the element is not changed.  A copy of the new
+  // backing store of all the copies is returned if there were any
+  // copies and in invalid frame element is returned if there were no
+  // copies.
+  FrameElement AdjustCopies(int index);
+
+  // Call a code stub that has already been prepared for calling (via
+  // PrepareForCall).
+  Result RawCallStub(CodeStub* stub, int frame_arg_count);
+
+  // Calls a code object which has already been prepared for calling
+  // (via PrepareForCall).
+  Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+
+  bool Equals(VirtualFrame* other);
+
+  friend class JumpTarget;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_VIRTUAL_FRAME_ARM_H_
diff --git a/src/virtual-frame-ia32.cc b/src/virtual-frame-ia32.cc
new file mode 100644
index 0000000..b31c394
--- /dev/null
+++ b/src/virtual-frame-ia32.cc
@@ -0,0 +1,995 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "codegen-inl.h"
+#include "virtual-frame.h"
+
+namespace v8 { namespace internal {
+
+#define __ masm_->
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+// On entry to a function, the virtual frame already contains the receiver,
+// the parameters, and a return address.  All frame elements are in memory.
+VirtualFrame::VirtualFrame(CodeGenerator* cgen)
+    : cgen_(cgen),
+      masm_(cgen->masm()),
+      elements_(0),
+      parameter_count_(cgen->scope()->num_parameters()),
+      local_count_(0),
+      stack_pointer_(parameter_count_ + 1),  // 0-based index of TOS.
+      frame_pointer_(kIllegalIndex) {
+  for (int i = 0; i < parameter_count_ + 2; i++) {
+    elements_.Add(FrameElement::MemoryElement());
+  }
+}
+
+
+// Clear the dirty bit for the element at a given index if it is a
+// valid element.  The stack address corresponding to the element must
+// be allocated on the physical stack, or the first element above the
+// stack pointer so it can be allocated by a single push instruction.
+void VirtualFrame::RawSyncElementAt(int index) {
+  FrameElement element = elements_[index];
+
+  if (!element.is_valid() || element.is_synced()) return;
+
+  if (index <= stack_pointer_) {
+    // Emit code to write elements below the stack pointer to their
+    // (already allocated) stack address.
+    switch (element.type()) {
+      case FrameElement::INVALID:  // Fall through.
+      case FrameElement::MEMORY:
+        // There was an early bailout for invalid and synced elements
+        // (memory elements are always synced).
+        UNREACHABLE();
+        break;
+
+      case FrameElement::REGISTER:
+        __ mov(Operand(ebp, fp_relative(index)), element.reg());
+        break;
+
+      case FrameElement::CONSTANT:
+        if (cgen_->IsUnsafeSmi(element.handle())) {
+          Result temp = cgen_->allocator()->Allocate();
+          ASSERT(temp.is_valid());
+          cgen_->LoadUnsafeSmi(temp.reg(), element.handle());
+          __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+        } else {
+          __ Set(Operand(ebp, fp_relative(index)),
+                 Immediate(element.handle()));
+        }
+        break;
+
+      case FrameElement::COPY: {
+        int backing_index = element.index();
+        FrameElement backing_element = elements_[backing_index];
+        if (backing_element.is_memory()) {
+          Result temp = cgen_->allocator()->Allocate();
+          ASSERT(temp.is_valid());
+          __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
+          __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+        } else {
+          ASSERT(backing_element.is_register());
+          __ mov(Operand(ebp, fp_relative(index)), backing_element.reg());
+        }
+        break;
+      }
+    }
+
+  } else {
+    // Push elements above the stack pointer to allocate space and
+    // sync them.  Space should have already been allocated in the
+    // actual frame for all the elements below this one.
+    ASSERT(index == stack_pointer_ + 1);
+    stack_pointer_++;
+    switch (element.type()) {
+      case FrameElement::INVALID:  // Fall through.
+      case FrameElement::MEMORY:
+        // There was an early bailout for invalid and synced elements
+        // (memory elements are always synced).
+        UNREACHABLE();
+        break;
+
+      case FrameElement::REGISTER:
+        __ push(element.reg());
+        break;
+
+      case FrameElement::CONSTANT:
+        if (cgen_->IsUnsafeSmi(element.handle())) {
+          Result temp = cgen_->allocator()->Allocate();
+          ASSERT(temp.is_valid());
+          cgen_->LoadUnsafeSmi(temp.reg(), element.handle());
+          __ push(temp.reg());
+        } else {
+          __ push(Immediate(element.handle()));
+        }
+        break;
+
+      case FrameElement::COPY: {
+        int backing_index = element.index();
+        FrameElement backing = elements_[backing_index];
+        ASSERT(backing.is_memory() || backing.is_register());
+        if (backing.is_memory()) {
+          __ push(Operand(ebp, fp_relative(backing_index)));
+        } else {
+          __ push(backing.reg());
+        }
+        break;
+      }
+    }
+  }
+
+  elements_[index].set_sync();
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+  Comment cmnt(masm_, "[ Merge frame");
+  // We should always be merging the code generator's current frame to an
+  // expected frame.
+  ASSERT(cgen_->frame() == this);
+
+  // Adjust the stack pointer upward (toward the top of the virtual
+  // frame) if necessary.
+  if (stack_pointer_ < expected->stack_pointer_) {
+    int difference = expected->stack_pointer_ - stack_pointer_;
+    stack_pointer_ = expected->stack_pointer_;
+    __ sub(Operand(esp), Immediate(difference * kPointerSize));
+  }
+
+  MergeMoveRegistersToMemory(expected);
+  MergeMoveRegistersToRegisters(expected);
+  MergeMoveMemoryToRegisters(expected);
+
+  // Fix any sync bit problems from the bottom-up, stopping when we
+  // hit the stack pointer or the top of the frame if the stack
+  // pointer is floating above the frame.
+  int limit = Min(stack_pointer_, elements_.length() - 1);
+  for (int i = 0; i <= limit; i++) {
+    FrameElement source = elements_[i];
+    FrameElement target = expected->elements_[i];
+    if (source.is_synced() && !target.is_synced()) {
+      elements_[i].clear_sync();
+    } else if (!source.is_synced() && target.is_synced()) {
+      SyncElementAt(i);
+    }
+  }
+
+  // Adjust the stack point downard if necessary.
+  if (stack_pointer_ > expected->stack_pointer_) {
+    int difference = stack_pointer_ - expected->stack_pointer_;
+    stack_pointer_ = expected->stack_pointer_;
+    __ add(Operand(esp), Immediate(difference * kPointerSize));
+  }
+
+  // At this point, the frames should be identical.
+  ASSERT(Equals(expected));
+}
+
+
+void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
+  ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+  // Move registers, constants, and copies to memory.  Perform moves
+  // from the top downward in the frame in order to leave the backing
+  // stores of copies in registers.
+  //
+  // Moving memory-backed copies to memory requires a spare register
+  // for the memory-to-memory moves.  Since we are performing a merge,
+  // we use esi (which is already saved in the frame).  We keep track
+  // of the index of the frame element esi is caching or kIllegalIndex
+  // if esi has not been disturbed.
+  int esi_caches = kIllegalIndex;
+  // A "singleton" memory element.
+  FrameElement memory_element = FrameElement::MemoryElement();
+  // Loop downward from the stack pointer or the top of the frame if
+  // the stack pointer is floating above the frame.
+  int start = Min(stack_pointer_, elements_.length() - 1);
+  for (int i = start; i >= 0; i--) {
+    FrameElement target = expected->elements_[i];
+    if (target.is_memory()) {
+      FrameElement source = elements_[i];
+      switch (source.type()) {
+        case FrameElement::INVALID:
+          // Not a legal merge move.
+          UNREACHABLE();
+          break;
+
+        case FrameElement::MEMORY:
+          // Already in place.
+          break;
+
+        case FrameElement::REGISTER:
+          Unuse(source.reg());
+          if (!source.is_synced()) {
+            __ mov(Operand(ebp, fp_relative(i)), source.reg());
+          }
+          break;
+
+        case FrameElement::CONSTANT:
+          if (!source.is_synced()) {
+            if (cgen_->IsUnsafeSmi(source.handle())) {
+              esi_caches = i;
+              cgen_->LoadUnsafeSmi(esi, source.handle());
+              __ mov(Operand(ebp, fp_relative(i)), esi);
+            } else {
+              __ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
+            }
+          }
+          break;
+
+        case FrameElement::COPY:
+          if (!source.is_synced()) {
+            int backing_index = source.index();
+            FrameElement backing_element = elements_[backing_index];
+            if (backing_element.is_memory()) {
+              // If we have to spill a register, we spill esi.
+              if (esi_caches != backing_index) {
+                esi_caches = backing_index;
+                __ mov(esi, Operand(ebp, fp_relative(backing_index)));
+              }
+              __ mov(Operand(ebp, fp_relative(i)), esi);
+            } else {
+              ASSERT(backing_element.is_register());
+              __ mov(Operand(ebp, fp_relative(i)), backing_element.reg());
+            }
+          }
+          break;
+      }
+      elements_[i] = memory_element;
+    }
+  }
+
+  if (esi_caches != kIllegalIndex) {
+    __ mov(esi, Operand(ebp, fp_relative(context_index())));
+  }
+}
+
+
+void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
+  // We have already done X-to-memory moves.
+  ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+  // Perform register-to-register moves.
+  int start = 0;
+  int end = elements_.length() - 1;
+  bool any_moves_blocked;  // Did we fail to make some moves this iteration?
+  bool should_break_cycles = false;
+  bool any_moves_made;  // Did we make any progress this iteration?
+  do {
+    any_moves_blocked = false;
+    any_moves_made = false;
+    int first_move_blocked = kIllegalIndex;
+    int last_move_blocked = kIllegalIndex;
+    for (int i = start; i <= end; i++) {
+      FrameElement source = elements_[i];
+      FrameElement target = expected->elements_[i];
+      if (source.is_register() && target.is_register()) {
+        if (target.reg().is(source.reg())) {
+          if (target.is_synced() && !source.is_synced()) {
+            __ mov(Operand(ebp, fp_relative(i)), source.reg());
+          }
+          elements_[i] = target;
+        } else {
+          // We need to move source to target.
+          if (frame_registers_.is_used(target.reg())) {
+            // The move is blocked because the target contains valid data.
+            // If we are stuck with only cycles remaining, then we spill source.
+            // Otherwise, we just need more iterations.
+            if (should_break_cycles) {
+              SpillElementAt(i);
+              should_break_cycles = false;
+            } else {  // Record a blocked move.
+              if (!any_moves_blocked) {
+                first_move_blocked = i;
+              }
+              last_move_blocked = i;
+              any_moves_blocked = true;
+            }
+          } else {
+            // The move is not blocked.  This frame element can be moved from
+            // its source register to its target register.
+            if (target.is_synced() && !source.is_synced()) {
+              SyncElementAt(i);
+            }
+            Use(target.reg());
+            Unuse(source.reg());
+            elements_[i] = target;
+            __ mov(target.reg(), source.reg());
+            any_moves_made = true;
+          }
+        }
+      }
+    }
+    // Update control flags for next iteration.
+    should_break_cycles = (any_moves_blocked && !any_moves_made);
+    if (any_moves_blocked) {
+      start = first_move_blocked;
+      end = last_move_blocked;
+    }
+  } while (any_moves_blocked);
+}
+
+
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
+  // Move memory, constants, and copies to registers.  This is the
+  // final step and is done from the bottom up so that the backing
+  // elements of copies are in their correct locations when we
+  // encounter the copies.
+  for (int i = 0; i < elements_.length(); i++) {
+    FrameElement source = elements_[i];
+    FrameElement target = expected->elements_[i];
+    if (target.is_register() && !source.is_register()) {
+      switch (source.type()) {
+        case FrameElement::INVALID:  // Fall through.
+        case FrameElement::REGISTER:
+          UNREACHABLE();
+          break;
+
+        case FrameElement::MEMORY:
+          ASSERT(i <= stack_pointer_);
+          __ mov(target.reg(), Operand(ebp, fp_relative(i)));
+          break;
+
+        case FrameElement::CONSTANT:
+          if (cgen_->IsUnsafeSmi(source.handle())) {
+            cgen_->LoadUnsafeSmi(target.reg(), source.handle());
+          } else {
+           __ Set(target.reg(), Immediate(source.handle()));
+          }
+          break;
+
+        case FrameElement::COPY: {
+          FrameElement backing = elements_[source.index()];
+          ASSERT(backing.is_memory() || backing.is_register());
+          if (backing.is_memory()) {
+            ASSERT(source.index() <= stack_pointer_);
+            __ mov(target.reg(), Operand(ebp, fp_relative(source.index())));
+          } else {
+            __ mov(target.reg(), backing.reg());
+          }
+        }
+      }
+      // Ensure the proper sync state.  If the source was memory no
+      // code needs to be emitted.
+      if (target.is_synced() && !source.is_memory()) {
+        SyncElementAt(i);
+      }
+      Use(target.reg());
+      elements_[i] = target;
+    }
+  }
+}
+
+
+void VirtualFrame::Enter() {
+  // Registers live on entry: esp, ebp, esi, edi.
+  Comment cmnt(masm_, "[ Enter JS frame");
+
+#ifdef DEBUG
+  // Verify that edi contains a JS function.  The following code
+  // relies on eax being available for use.
+  __ test(edi, Immediate(kSmiTagMask));
+  __ Check(not_zero,
+           "VirtualFrame::Enter - edi is not a function (smi check).");
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
+  __ Check(equal,
+           "VirtualFrame::Enter - edi is not a function (map check).");
+#endif
+
+  EmitPush(ebp);
+
+  frame_pointer_ = stack_pointer_;
+  __ mov(ebp, Operand(esp));
+
+  // Store the context in the frame.  The context is kept in esi and a
+  // copy is stored in the frame.  The external reference to esi
+  // remains.
+  EmitPush(esi);
+
+  // Store the function in the frame.  The frame owns the register
+  // reference now (ie, it can keep it in edi or spill it later).
+  Push(edi);
+  SyncElementAt(elements_.length() - 1);
+  cgen_->allocator()->Unuse(edi);
+}
+
+
+void VirtualFrame::Exit() {
+  Comment cmnt(masm_, "[ Exit JS frame");
+  // Record the location of the JS exit code for patching when setting
+  // break point.
+  __ RecordJSReturn();
+
+  // Avoid using the leave instruction here, because it is too
+  // short. We need the return sequence to be a least the size of a
+  // call instruction to support patching the exit code in the
+  // debugger. See VisitReturnStatement for the full return sequence.
+  __ mov(esp, Operand(ebp));
+  stack_pointer_ = frame_pointer_;
+  for (int i = elements_.length() - 1; i > stack_pointer_; i--) {
+    FrameElement last = elements_.RemoveLast();
+    if (last.is_register()) {
+      Unuse(last.reg());
+    }
+  }
+
+  frame_pointer_ = kIllegalIndex;
+  EmitPop(ebp);
+}
+
+
+void VirtualFrame::AllocateStackSlots(int count) {
+  ASSERT(height() == 0);
+  local_count_ = count;
+
+  if (count > 0) {
+    Comment cmnt(masm_, "[ Allocate space for locals");
+    // The locals are initialized to a constant (the undefined value), but
+    // we sync them with the actual frame to allocate space for spilling
+    // them later.  First sync everything above the stack pointer so we can
+    // use pushes to allocate and initialize the locals.
+    SyncRange(stack_pointer_ + 1, elements_.length());
+    Handle<Object> undefined = Factory::undefined_value();
+    FrameElement initial_value =
+        FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
+    Result temp = cgen_->allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    __ Set(temp.reg(), Immediate(undefined));
+    for (int i = 0; i < count; i++) {
+      elements_.Add(initial_value);
+      stack_pointer_++;
+      __ push(temp.reg());
+    }
+  }
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+  ASSERT(elements_[context_index()].is_memory());
+  __ mov(Operand(ebp, fp_relative(context_index())), esi);
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+  ASSERT(elements_[context_index()].is_memory());
+  __ mov(esi, Operand(ebp, fp_relative(context_index())));
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+  Result temp = cgen_->allocator()->Allocate();
+  ASSERT(temp.is_valid());
+  __ lea(temp.reg(), ParameterAt(-1));
+  Push(&temp);
+}
+
+
+// Before changing an element which is copied, adjust so that the
+// first copy becomes the new backing store and all the other copies
+// are updated.  If the original was in memory, the new backing store
+// is allocated to a register.  Return a copy of the new backing store
+// or an invalid element if the original was not a copy.
+FrameElement VirtualFrame::AdjustCopies(int index) {
+  FrameElement original = elements_[index];
+  ASSERT(original.is_memory() || original.is_register());
+
+  // Go looking for a first copy above index.
+  int i = index + 1;
+  while (i < elements_.length()) {
+    FrameElement elt = elements_[i];
+    if (elt.is_copy() && elt.index() == index) break;
+    i++;
+  }
+
+  if (i < elements_.length()) {
+    // There was a first copy.  Make it the new backing element.
+    Register backing_reg;
+    if (original.is_memory()) {
+      Result fresh = cgen_->allocator()->Allocate();
+      ASSERT(fresh.is_valid());
+      backing_reg = fresh.reg();
+      __ mov(backing_reg, Operand(ebp, fp_relative(index)));
+    } else {
+      // The original was in a register.
+      backing_reg = original.reg();
+    }
+    FrameElement new_backing_element =
+        FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED);
+    if (elements_[i].is_synced()) {
+      new_backing_element.set_sync();
+    }
+    Use(backing_reg);
+    elements_[i] = new_backing_element;
+
+    // Update the other copies.
+    FrameElement copy = CopyElementAt(i);
+    for (int j = i; j < elements_.length(); j++) {
+      FrameElement elt = elements_[j];
+      if (elt.is_copy() && elt.index() == index) {
+        if (elt.is_synced()) {
+          copy.set_sync();
+        } else {
+          copy.clear_sync();
+        }
+        elements_[j] = copy;
+      }
+    }
+
+    copy.clear_sync();
+    return copy;
+  }
+
+  return FrameElement::InvalidElement();
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+  ASSERT(index >= 0);
+  ASSERT(index <= elements_.length());
+  FrameElement original = elements_[index];
+
+  switch (original.type()) {
+    case FrameElement::INVALID:
+      UNREACHABLE();
+      break;
+
+    case FrameElement::MEMORY: {
+      // Allocate the element to a register.  If it is not copied,
+      // push that register on top of the frame.  If it is copied,
+      // make the first copy the backing store and push a fresh copy
+      // on top of the frame.
+      FrameElement copy = AdjustCopies(index);
+      if (copy.is_valid()) {
+        // The original element was a copy.  Push the copy of the new
+        // backing store.
+        elements_.Add(copy);
+      } else {
+        // The element was not a copy.  Move it to a register and push
+        // that.
+        Result fresh = cgen_->allocator()->Allocate();
+        ASSERT(fresh.is_valid());
+        FrameElement new_element =
+            FrameElement::RegisterElement(fresh.reg(),
+                                          FrameElement::NOT_SYNCED);
+        Use(fresh.reg());
+        elements_.Add(new_element);
+        __ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
+      }
+      break;
+    }
+
+    case FrameElement::REGISTER: {
+      // If the element is not copied, push it on top of the frame.
+      // If it is copied, make the first copy be the new backing store
+      // and push a fresh copy on top of the frame.
+      FrameElement copy = AdjustCopies(index);
+      if (copy.is_valid()) {
+        // The original element was a copy.  Push the copy of the new
+        // backing store.
+        elements_.Add(copy);
+        // This is the only case where we have to unuse the original
+        // register.  The original is still counted and so is the new
+        // backing store of the copies.
+        Unuse(original.reg());
+      } else {
+        // The element was not a copy.  Push it.
+        original.clear_sync();
+        elements_.Add(original);
+      }
+      break;
+    }
+
+    case FrameElement::CONSTANT:
+      original.clear_sync();
+      elements_.Add(original);
+      break;
+
+    case FrameElement::COPY:
+      original.clear_sync();
+      elements_.Add(original);
+      break;
+  }
+  elements_[index] = FrameElement::InvalidElement();
+}
+
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+  // Store the value on top of the frame to the virtual frame slot at
+  // a given index.  The value on top of the frame is left in place.
+  // This is a duplicating operation, so it can create copies.
+  ASSERT(index >= 0);
+  ASSERT(index < elements_.length());
+
+  FrameElement original = elements_[index];
+  // If the stored-to slot may be copied, adjust to preserve the
+  // copy-on-write semantics of copied elements.
+  if (original.is_register() || original.is_memory()) {
+    FrameElement ignored = AdjustCopies(index);
+  }
+
+  // If the stored-to slot is a register reference, deallocate it.
+  if (original.is_register()) {
+    Unuse(original.reg());
+  }
+
+  int top_index = elements_.length() - 1;
+  FrameElement top = elements_[top_index];
+  ASSERT(top.is_valid());
+
+  if (top.is_copy()) {
+    // There are two cases based on the relative positions of the
+    // stored-to slot and the backing slot of the top element.
+    int backing_index = top.index();
+    ASSERT(backing_index != index);
+    if (backing_index < index) {
+      // 1. The top element is a copy of a slot below the stored-to
+      // slot.  The stored-to slot becomes an unsynced copy of that
+      // same backing slot.
+      elements_[index] = CopyElementAt(backing_index);
+    } else {
+      // 2. The top element is a copy of a slot above the stored-to
+      // slot.  The stored-to slot becomes the new (unsynced) backing
+      // slot and both the top element and the element at the former
+      // backing slot become copies of it.  The sync state of the top
+      // and former backing elements is preserved.
+      FrameElement backing_element = elements_[backing_index];
+      ASSERT(backing_element.is_memory() || backing_element.is_register());
+      if (backing_element.is_memory()) {
+        // Because sets of copies are canonicalized to be backed by
+        // their lowest frame element, and because memory frame
+        // elements are backed by the corresponding stack address, we
+        // have to move the actual value down in the stack.
+        //
+        // TODO(209): considering allocating the stored-to slot to the
+        // temp register.  Alternatively, allow copies to appear in
+        // any order in the frame and lazily move the value down to
+        // the slot.
+        Result temp = cgen_->allocator()->Allocate();
+        ASSERT(temp.is_valid());
+        __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
+        __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+      } else if (backing_element.is_synced()) {
+        // If the element is a register, we will not actually move
+        // anything on the stack but only update the virtual frame
+        // element.
+        backing_element.clear_sync();
+      }
+      elements_[index] = backing_element;
+
+      // The old backing element becomes a copy of the new backing
+      // element.
+      FrameElement new_element = CopyElementAt(index);
+      elements_[backing_index] = new_element;
+      if (backing_element.is_synced()) {
+        elements_[backing_index].set_sync();
+      }
+
+      // All the copies of the old backing element (including the top
+      // element) become copies of the new backing element.
+      for (int i = backing_index + 1; i < elements_.length(); i++) {
+        FrameElement current = elements_[i];
+        if (current.is_copy() && current.index() == backing_index) {
+          elements_[i] = new_element;
+          if (current.is_synced()) {
+            elements_[i].set_sync();
+          }
+        }
+      }
+    }
+
+    return;
+  }
+
+  // Move the top element to the stored-to slot and replace it (the
+  // top element) with a copy.
+  elements_[index] = top;
+  if (top.is_memory()) {
+    // TODO(209): consider allocating the stored-to slot to the temp
+    // register.  Alternatively, allow copies to appear in any order
+    // in the frame and lazily move the value down to the slot.
+    FrameElement new_top = CopyElementAt(index);
+    new_top.set_sync();
+    elements_[top_index] = new_top;
+
+    // The sync state of the former top element is correct (synced).
+    // Emit code to move the value down in the frame.
+    Result temp = cgen_->allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    __ mov(temp.reg(), Operand(esp, 0));
+    __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+  } else if (top.is_register()) {
+    // The stored-to slot has the (unsynced) register reference and
+    // the top element becomes a copy.  The sync state of the top is
+    // preserved.
+    FrameElement new_top = CopyElementAt(index);
+    if (top.is_synced()) {
+      new_top.set_sync();
+      elements_[index].clear_sync();
+    }
+    elements_[top_index] = new_top;
+  } else {
+    // The stored-to slot holds the same value as the top but
+    // unsynced.  (We do not have copies of constants yet.)
+    ASSERT(top.is_constant());
+    elements_[index].clear_sync();
+  }
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+  ASSERT(cgen_->HasValidEntryRegisters());
+  // Grow the expression stack by handler size less two (the return address
+  // is already pushed by a call instruction, and PushTryHandler from the
+  // macro assembler will leave the top of stack in the eax register to be
+  // pushed separately).
+  Adjust(kHandlerSize - 2);
+  __ PushTryHandler(IN_JAVASCRIPT, type);
+  // TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
+  EmitPush(eax);
+}
+
+
+Result VirtualFrame::RawCallStub(CodeStub* stub, int frame_arg_count) {
+  ASSERT(cgen_->HasValidEntryRegisters());
+  __ CallStub(stub);
+  Result result = cgen_->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::Function* f,
+                                 int frame_arg_count) {
+  PrepareForCall(frame_arg_count, frame_arg_count);
+  ASSERT(cgen_->HasValidEntryRegisters());
+  __ CallRuntime(f, frame_arg_count);
+  Result result = cgen_->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::FunctionId id,
+                                 int frame_arg_count) {
+  PrepareForCall(frame_arg_count, frame_arg_count);
+  ASSERT(cgen_->HasValidEntryRegisters());
+  __ CallRuntime(id, frame_arg_count);
+  Result result = cgen_->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+                                   InvokeFlag flag,
+                                   int frame_arg_count) {
+  PrepareForCall(frame_arg_count, frame_arg_count);
+  ASSERT(cgen_->HasValidEntryRegisters());
+  __ InvokeBuiltin(id, flag);
+  Result result = cgen_->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+                                       RelocInfo::Mode rmode) {
+  ASSERT(cgen_->HasValidEntryRegisters());
+  __ call(code, rmode);
+  Result result = cgen_->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::CallCodeObject(Handle<Code> code,
+                                    RelocInfo::Mode rmode,
+                                    Result* arg,
+                                    int dropped_args) {
+  int spilled_args = 0;
+  switch (code->kind()) {
+    case Code::CALL_IC:
+      ASSERT(arg->reg().is(eax));
+      spilled_args = dropped_args + 1;
+      break;
+    case Code::LOAD_IC:
+      ASSERT(arg->reg().is(ecx));
+      ASSERT(dropped_args == 0);
+      spilled_args = 1;
+      break;
+    case Code::KEYED_STORE_IC:
+      ASSERT(arg->reg().is(eax));
+      ASSERT(dropped_args == 0);
+      spilled_args = 2;
+      break;
+    default:
+      // No other types of code objects are called with values
+      // in exactly one register.
+      UNREACHABLE();
+      break;
+  }
+  PrepareForCall(spilled_args, dropped_args);
+  arg->Unuse();
+  return RawCallCodeObject(code, rmode);
+}
+
+
+Result VirtualFrame::CallCodeObject(Handle<Code> code,
+                                    RelocInfo::Mode rmode,
+                                    Result* arg0,
+                                    Result* arg1,
+                                    int dropped_args) {
+  int spilled_args = 1;
+  switch (code->kind()) {
+    case Code::STORE_IC:
+      ASSERT(arg0->reg().is(eax));
+      ASSERT(arg1->reg().is(ecx));
+      ASSERT(dropped_args == 0);
+      spilled_args = 1;
+      break;
+    case Code::BUILTIN:
+      ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
+      ASSERT(arg0->reg().is(eax));
+      ASSERT(arg1->reg().is(edi));
+      spilled_args = dropped_args + 1;
+      break;
+    default:
+      // No other types of code objects are called with values
+      // in exactly two registers.
+      UNREACHABLE();
+      break;
+  }
+  PrepareForCall(spilled_args, dropped_args);
+  arg0->Unuse();
+  arg1->Unuse();
+  return RawCallCodeObject(code, rmode);
+}
+
+
+void VirtualFrame::Drop(int count) {
+  ASSERT(height() >= count);
+  int num_virtual_elements = (elements_.length() - 1) - stack_pointer_;
+
+  // Emit code to lower the stack pointer if necessary.
+  if (num_virtual_elements < count) {
+    int num_dropped = count - num_virtual_elements;
+    stack_pointer_ -= num_dropped;
+    __ add(Operand(esp), Immediate(num_dropped * kPointerSize));
+  }
+
+  // Discard elements from the virtual frame and free any registers.
+  for (int i = 0; i < count; i++) {
+    FrameElement dropped = elements_.RemoveLast();
+    if (dropped.is_register()) {
+      Unuse(dropped.reg());
+    }
+  }
+}
+
+
+Result VirtualFrame::Pop() {
+  FrameElement element = elements_.RemoveLast();
+  int index = elements_.length();
+  ASSERT(element.is_valid());
+
+  bool pop_needed = (stack_pointer_ == index);
+  if (pop_needed) {
+    stack_pointer_--;
+    if (element.is_memory()) {
+      Result temp = cgen_->allocator()->Allocate();
+      ASSERT(temp.is_valid());
+      __ pop(temp.reg());
+      return temp;
+    }
+
+    __ add(Operand(esp), Immediate(kPointerSize));
+  }
+  ASSERT(!element.is_memory());
+
+  // The top element is a register, constant, or a copy.  Unuse
+  // registers and follow copies to their backing store.
+  if (element.is_register()) {
+    Unuse(element.reg());
+  } else if (element.is_copy()) {
+    ASSERT(element.index() < index);
+    index = element.index();
+    element = elements_[index];
+  }
+  ASSERT(!element.is_copy());
+
+  // The element is memory, a register, or a constant.
+  if (element.is_memory()) {
+    // Memory elements could only be the backing store of a copy.
+    // Allocate the original to a register.
+    ASSERT(index <= stack_pointer_);
+    Result temp = cgen_->allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    Use(temp.reg());
+    FrameElement new_element =
+        FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
+    elements_[index] = new_element;
+    __ mov(temp.reg(), Operand(ebp, fp_relative(index)));
+    return Result(temp.reg(), cgen_);
+  } else if (element.is_register()) {
+    return Result(element.reg(), cgen_);
+  } else {
+    ASSERT(element.is_constant());
+    return Result(element.handle(), cgen_);
+  }
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+  ASSERT(stack_pointer_ == elements_.length() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ pop(reg);
+}
+
+
+void VirtualFrame::EmitPop(Operand operand) {
+  ASSERT(stack_pointer_ == elements_.length() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ pop(operand);
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+  ASSERT(stack_pointer_ == elements_.length() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(reg);
+}
+
+
+void VirtualFrame::EmitPush(Operand operand) {
+  ASSERT(stack_pointer_ == elements_.length() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(operand);
+}
+
+
+void VirtualFrame::EmitPush(Immediate immediate) {
+  ASSERT(stack_pointer_ == elements_.length() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(immediate);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/virtual-frame-ia32.h b/src/virtual-frame-ia32.h
new file mode 100644
index 0000000..5b3a320
--- /dev/null
+++ b/src/virtual-frame-ia32.h
@@ -0,0 +1,459 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VIRTUAL_FRAME_IA32_H_
+#define V8_VIRTUAL_FRAME_IA32_H_
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frames
+//
+// The virtual frame is an abstraction of the physical stack frame.  It
+// encapsulates the parameters, frame-allocated locals, and the expression
+// stack.  It supports push/pop operations on the expression stack, as well
+// as random access to the expression stack elements, locals, and
+// parameters.
+
+class VirtualFrame : public Malloced {
+ public:
+  // A utility class to introduce a scope where the virtual frame is
+  // expected to remain spilled.  The constructor spills the code
+  // generator's current frame, but no attempt is made to require it
+  // to stay spilled.  It is intended as documentation while the code
+  // generator is being transformed.
+  class SpilledScope BASE_EMBEDDED {
+   public:
+    explicit SpilledScope(CodeGenerator* cgen);
+
+    ~SpilledScope();
+
+   private:
+    CodeGenerator* cgen_;
+    bool previous_state_;
+  };
+
+  // Construct an initial virtual frame on entry to a JS function.
+  explicit VirtualFrame(CodeGenerator* cgen);
+
+  // Construct a virtual frame as a clone of an existing one.
+  explicit VirtualFrame(VirtualFrame* original);
+
+  // Create a duplicate of an existing valid frame element.
+  FrameElement CopyElementAt(int index);
+
+  // The height of the virtual expression stack.
+  int height() const {
+    return elements_.length() - expression_base_index();
+  }
+
+  int register_count(Register reg) {
+    return frame_registers_.count(reg);
+  }
+
+  // Add extra in-memory elements to the top of the frame to match an actual
+  // frame (eg, the frame after an exception handler is pushed).  No code is
+  // emitted.
+  void Adjust(int count);
+
+  // Forget count elements from the top of the frame all in-memory
+  // (including synced) and adjust the stack pointer downward, to
+  // match an external frame effect (examples include a call removing
+  // its arguments, and exiting a try/catch removing an exception
+  // handler).  No code will be emitted.
+  void Forget(int count);
+
+  // Forget count elements from the top of the frame without adjusting
+  // the stack pointer downward.  This is used, for example, before
+  // merging frames at break, continue, and return targets.
+  void ForgetElements(int count);
+
+  // Spill all values from the frame to memory.
+  void SpillAll();
+
+  // Spill all occurrences of a specific register from the frame.
+  void Spill(Register reg);
+
+  // Spill all occurrences of an arbitrary register if possible.  Return the
+  // register spilled or no_reg if it was not possible to free any register
+  // (ie, they all have frame-external references).
+  Register SpillAnyRegister();
+
+  // Prepare this virtual frame for merging to an expected frame by
+  // performing some state changes that do not require generating
+  // code.  It is guaranteed that no code will be generated.
+  void PrepareMergeTo(VirtualFrame* expected);
+
+  // Make this virtual frame have a state identical to an expected virtual
+  // frame.  As a side effect, code may be emitted to make this frame match
+  // the expected one.
+  void MergeTo(VirtualFrame* expected);
+
+  // Detach a frame from its code generator, perhaps temporarily.  This
+  // tells the register allocator that it is free to use frame-internal
+  // registers.  Used when the code generator's frame is switched from this
+  // one to NULL by an unconditional jump.
+  void DetachFromCodeGenerator();
+
+  // (Re)attach a frame to its code generator.  This informs the register
+  // allocator that the frame-internal register references are active again.
+  // Used when a code generator's frame is switched from NULL to this one by
+  // binding a label.
+  void AttachToCodeGenerator();
+
+  // Emit code for the physical JS entry and exit frame sequences.  After
+  // calling Enter, the virtual frame is ready for use; and after calling
+  // Exit it should not be used.  Note that Enter does not allocate space in
+  // the physical frame for storing frame-allocated locals.
+  void Enter();
+  void Exit();
+
+  // Prepare for returning from the frame by spilling locals.  This
+  // avoids generating unnecessary merge code when jumping to the
+  // shared return site.  Emits code for spills.
+  void PrepareForReturn();
+
+  // Allocate and initialize the frame-allocated locals.
+  void AllocateStackSlots(int count);
+
+  // An element of the expression stack as an assembly operand.
+  Operand ElementAt(int index) const {
+    return Operand(esp, index * kPointerSize);
+  }
+
+  // Random-access store to a frame-top relative frame element.  The result
+  // becomes owned by the frame and is invalidated.
+  void SetElementAt(int index, Result* value);
+
+  // Set a frame element to a constant.  The index is frame-top relative.
+  void SetElementAt(int index, Handle<Object> value) {
+    Result temp(value, cgen_);
+    SetElementAt(index, &temp);
+  }
+
+  void PushElementAt(int index) {
+    PushFrameSlotAt(elements_.length() - index - 1);
+  }
+
+  // A frame-allocated local as an assembly operand.
+  Operand LocalAt(int index) const {
+    ASSERT(0 <= index);
+    ASSERT(index < local_count_);
+    return Operand(ebp, kLocal0Offset - index * kPointerSize);
+  }
+
+  // Push a copy of the value of a local frame slot on top of the frame.
+  void PushLocalAt(int index) {
+    PushFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the value of a local frame slot on top of the frame and invalidate
+  // the local slot.  The slot should be written to before trying to read
+  // from it again.
+  void TakeLocalAt(int index) {
+    TakeFrameSlotAt(local0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a local frame slot.  The
+  // value is left in place on top of the frame.
+  void StoreToLocalAt(int index) {
+    StoreToFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the address of the receiver slot on the frame.
+  void PushReceiverSlotAddress();
+
+  // Push the function on top of the frame.
+  void PushFunction() { PushFrameSlotAt(function_index()); }
+
+  // Save the value of the esi register to the context frame slot.
+  void SaveContextRegister();
+
+  // Restore the esi register from the value of the context frame
+  // slot.
+  void RestoreContextRegister();
+
+  // A parameter as an assembly operand.
+  Operand ParameterAt(int index) const {
+    ASSERT(-1 <= index);  // -1 is the receiver.
+    ASSERT(index < parameter_count_);
+    return Operand(ebp, (1 + parameter_count_ - index) * kPointerSize);
+  }
+
+  // Push a copy of the value of a parameter frame slot on top of the frame.
+  void PushParameterAt(int index) {
+    PushFrameSlotAt(param0_index() + index);
+  }
+
+  // Push the value of a paramter frame slot on top of the frame and
+  // invalidate the parameter slot.  The slot should be written to before
+  // trying to read from it again.
+  void TakeParameterAt(int index) {
+    TakeFrameSlotAt(param0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a parameter frame slot.
+  // The value is left in place on top of the frame.
+  void StoreToParameterAt(int index) {
+    StoreToFrameSlotAt(param0_index() + index);
+  }
+
+  // The receiver frame slot.
+  Operand Receiver() const { return ParameterAt(-1); }
+
+  // Push a try-catch or try-finally handler on top of the virtual frame.
+  void PushTryHandler(HandlerType type);
+
+  // Call a code stub, given the number of arguments it expects on (and
+  // removes from) the top of the physical frame.
+  Result CallStub(CodeStub* stub, int frame_arg_count);
+  Result CallStub(CodeStub* stub, Result* arg, int frame_arg_count);
+  Result CallStub(CodeStub* stub,
+                  Result* arg0,
+                  Result* arg1,
+                  int frame_arg_count);
+
+  // Call the runtime, given the number of arguments expected on (and
+  // removed from) the top of the physical frame.
+  Result CallRuntime(Runtime::Function* f, int frame_arg_count);
+  Result CallRuntime(Runtime::FunctionId id, int frame_arg_count);
+
+  // Invoke a builtin, given the number of arguments it expects on (and
+  // removes from) the top of the physical frame.
+  Result InvokeBuiltin(Builtins::JavaScript id,
+                       InvokeFlag flag,
+                       int frame_arg_count);
+
+  // Call into a JS code object, given the number of arguments it
+  // removes from the top of the physical frame.
+  // Register arguments are passed as results and consumed by the call.
+  Result CallCodeObject(Handle<Code> ic,
+                        RelocInfo::Mode rmode,
+                        int dropped_args);
+  Result CallCodeObject(Handle<Code> ic,
+                        RelocInfo::Mode rmode,
+                        Result* arg,
+                        int dropped_args);
+  Result CallCodeObject(Handle<Code> ic,
+                        RelocInfo::Mode rmode,
+                        Result* arg0,
+                        Result* arg1,
+                        int dropped_args);
+
+  // Drop a number of elements from the top of the expression stack.  May
+  // emit code to affect the physical frame.  Does not clobber any registers
+  // excepting possibly the stack pointer.
+  void Drop(int count);
+
+  // Drop one element.
+  void Drop() { Drop(1); }
+
+  // Duplicate the top element of the frame.
+  void Dup() { PushFrameSlotAt(elements_.length() - 1); }
+
+  // Pop an element from the top of the expression stack.  Returns a
+  // Result, which may be a constant or a register.
+  Result Pop();
+
+  // Pop and save an element from the top of the expression stack and
+  // emit a corresponding pop instruction.
+  void EmitPop(Register reg);
+  void EmitPop(Operand operand);
+
+  // Push an element on top of the expression stack and emit a
+  // corresponding push instruction.
+  void EmitPush(Register reg);
+  void EmitPush(Operand operand);
+  void EmitPush(Immediate immediate);
+
+  // Push an element on the virtual frame.
+  void Push(Register reg);
+  void Push(Handle<Object> value);
+  void Push(Smi* value) { Push(Handle<Object>(value)); }
+
+  // Pushing a result invalidates it (its contents become owned by the
+  // frame).
+  void Push(Result* result);
+
+  // Nip removes zero or more elements from immediately below the top
+  // of the frame, leaving the previous top-of-frame value on top of
+  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+  void Nip(int num_dropped);
+
+ private:
+  // An illegal index into the virtual frame.
+  static const int kIllegalIndex = -1;
+
+  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+  static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
+
+  CodeGenerator* cgen_;
+  MacroAssembler* masm_;
+
+  List<FrameElement> elements_;
+
+  // The number of frame-allocated locals and parameters respectively.
+  int parameter_count_;
+  int local_count_;
+
+  // The index of the element that is at the processor's stack pointer
+  // (the esp register).
+  int stack_pointer_;
+
+  // The index of the element that is at the processor's frame pointer
+  // (the ebp register).
+  int frame_pointer_;
+
+  // The frame has an embedded register file that it uses to track registers
+  // used in the frame.
+  RegisterFile frame_registers_;
+
+  // The index of the first parameter.  The receiver lies below the first
+  // parameter.
+  int param0_index() const { return 1; }
+
+  // The index of the context slot in the frame.
+  int context_index() const {
+    ASSERT(frame_pointer_ != kIllegalIndex);
+    return frame_pointer_ + 1;
+  }
+
+  // The index of the function slot in the frame.  It lies above the context
+  // slot.
+  int function_index() const {
+    ASSERT(frame_pointer_ != kIllegalIndex);
+    return frame_pointer_ + 2;
+  }
+
+  // The index of the first local.  Between the parameters and the locals
+  // lie the return address, the saved frame pointer, the context, and the
+  // function.
+  int local0_index() const {
+    ASSERT(frame_pointer_ != kIllegalIndex);
+    return frame_pointer_ + 3;
+  }
+
+  // The index of the base of the expression stack.
+  int expression_base_index() const { return local0_index() + local_count_; }
+
+  // Convert a frame index into a frame pointer relative offset into the
+  // actual stack.
+  int fp_relative(int index) const {
+    return (frame_pointer_ - index) * kPointerSize;
+  }
+
+  // Record an occurrence of a register in the virtual frame.  This has the
+  // effect of incrementing both the register's frame-internal reference
+  // count and its external reference count.
+  void Use(Register reg);
+
+  // Record that a register reference has been dropped from the frame.  This
+  // decrements both the register's internal and external reference counts.
+  void Unuse(Register reg);
+
+  // Spill the element at a particular index---write it to memory if
+  // necessary, free any associated register, and forget its value if
+  // constant.
+  void SpillElementAt(int index);
+
+  // Sync the element at a particular index.  If it is a register or
+  // constant that disagrees with the value on the stack, write it to memory.
+  // Keep the element type as register or constant, and clear the dirty bit.
+  void SyncElementAt(int index);
+
+  // Sync the range of elements in [begin, end).
+  void SyncRange(int begin, int end);
+
+  // Sync a single element, assuming that its index is less than
+  // or equal to stack pointer + 1.
+  void RawSyncElementAt(int index);
+
+  // Push a copy of a frame slot (typically a local or parameter) on top of
+  // the frame.
+  void PushFrameSlotAt(int index);
+
+  // Push a the value of a frame slot (typically a local or parameter) on
+  // top of the frame and invalidate the slot.
+  void TakeFrameSlotAt(int index);
+
+  // Store the value on top of the frame to a frame slot (typically a local
+  // or parameter).
+  void StoreToFrameSlotAt(int index);
+
+  // Spill all elements in registers. Spill the top spilled_args elements
+  // on the frame.  Sync all other frame elements.
+  // Then drop dropped_args elements from the virtual frame, to match
+  // the effect of an upcoming call that will drop them from the stack.
+  void PrepareForCall(int spilled_args, int dropped_args);
+
+  // Move frame elements currently in registers or constants, that
+  // should be in memory in the expected frame, to memory.
+  void MergeMoveRegistersToMemory(VirtualFrame* expected);
+
+  // Make the register-to-register moves necessary to
+  // merge this frame with the expected frame.
+  // Register to memory moves must already have been made,
+  // and memory to register moves must follow this call.
+  // This is because some new memory-to-register moves are
+  // created in order to break cycles of register moves.
+  // Used in the implementation of MergeTo().
+  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+
+  // Make the memory-to-register and constant-to-register moves
+  // needed to make this frame equal the expected frame.
+  // Called after all register-to-memory and register-to-register
+  // moves have been made.  After this function returns, the frames
+  // should be equal.
+  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+
+  // Helper function to implement the copy-on-write semantics of an
+  // element's copies just before writing to the element.  The copies
+  // are updated, but the element is not changed.  A copy of the new
+  // backing store of all the copies is returned if there were any
+  // copies and in invalid frame element is returned if there were no
+  // copies.
+  FrameElement AdjustCopies(int index);
+
+  // Call a code stub that has already been prepared for calling (via
+  // PrepareForCall).
+  Result RawCallStub(CodeStub* stub, int frame_arg_count);
+
+  // Calls a code object which has already been prepared for calling
+  // (via PrepareForCall).
+  Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+
+  bool Equals(VirtualFrame* other);
+
+  friend class JumpTarget;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_VIRTUAL_FRAME_IA32_H_
diff --git a/src/virtual-frame.cc b/src/virtual-frame.cc
new file mode 100644
index 0000000..60fa699
--- /dev/null
+++ b/src/virtual-frame.cc
@@ -0,0 +1,564 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "codegen-inl.h"
+#include "virtual-frame.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+VirtualFrame::SpilledScope::SpilledScope(CodeGenerator* cgen)
+    : cgen_(cgen),
+      previous_state_(cgen->in_spilled_code()) {
+  ASSERT(cgen->has_valid_frame());
+  cgen->frame()->SpillAll();
+  cgen->set_in_spilled_code(true);
+}
+
+
+VirtualFrame::SpilledScope::~SpilledScope() {
+  cgen_->set_in_spilled_code(previous_state_);
+}
+
+
+// When cloned, a frame is a deep copy of the original.
+VirtualFrame::VirtualFrame(VirtualFrame* original)
+    : cgen_(original->cgen_),
+      masm_(original->masm_),
+      elements_(original->elements_.length()),
+      parameter_count_(original->parameter_count_),
+      local_count_(original->local_count_),
+      stack_pointer_(original->stack_pointer_),
+      frame_pointer_(original->frame_pointer_),
+      frame_registers_(original->frame_registers_) {
+  // Copy all the elements from the original.
+  for (int i = 0; i < original->elements_.length(); i++) {
+    elements_.Add(original->elements_[i]);
+  }
+}
+
+
+FrameElement VirtualFrame::CopyElementAt(int index) {
+  ASSERT(index >= 0);
+  ASSERT(index < elements_.length());
+
+  FrameElement target = elements_[index];
+  FrameElement result;
+
+  switch (target.type()) {
+    case FrameElement::CONSTANT:
+      // We do not copy constants and instead return a fresh unsynced
+      // constant.
+      result = FrameElement::ConstantElement(target.handle(),
+                                             FrameElement::NOT_SYNCED);
+      break;
+
+    case FrameElement::COPY:
+      // We do not allow copies of copies, so we follow one link to
+      // the actual backing store of a copy before making a copy.
+      index = target.index();
+      ASSERT(elements_[index].is_memory() || elements_[index].is_register());
+      // Fall through.
+
+    case FrameElement::MEMORY:  // Fall through.
+    case FrameElement::REGISTER:
+      // All copies are backed by memory or register locations.
+      result.type_ =
+          FrameElement::TypeField::encode(FrameElement::COPY) |
+          FrameElement::SyncField::encode(FrameElement::NOT_SYNCED);
+      result.data_.index_ = index;
+      break;
+
+    case FrameElement::INVALID:
+      // We should not try to copy invalid elements.
+      UNREACHABLE();
+      break;
+  }
+  return result;
+}
+
+
+// Modify the state of the virtual frame to match the actual frame by adding
+// extra in-memory elements to the top of the virtual frame.  The extra
+// elements will be externally materialized on the actual frame (eg, by
+// pushing an exception handler).  No code is emitted.
+void VirtualFrame::Adjust(int count) {
+  ASSERT(count >= 0);
+  ASSERT(stack_pointer_ == elements_.length() - 1);
+
+  for (int i = 0; i < count; i++) {
+    elements_.Add(FrameElement::MemoryElement());
+  }
+  stack_pointer_ += count;
+}
+
+
+// Modify the state of the virtual frame to match the actual frame by
+// removing elements from the top of the virtual frame.  The elements will
+// be externally popped from the actual frame (eg, by a runtime call).  No
+// code is emitted.
+void VirtualFrame::Forget(int count) {
+  ASSERT(count >= 0);
+  ASSERT(stack_pointer_ == elements_.length() - 1);
+
+  stack_pointer_ -= count;
+  ForgetElements(count);
+}
+
+
+void VirtualFrame::ForgetElements(int count) {
+  ASSERT(count >= 0);
+  ASSERT(elements_.length() >= count);
+
+  for (int i = 0; i < count; i++) {
+    FrameElement last = elements_.RemoveLast();
+    if (last.is_register()) {
+      // A hack to properly count register references for the code
+      // generator's current frame and also for other frames.  The
+      // same code appears in PrepareMergeTo.
+      if (cgen_->frame() == this) {
+        Unuse(last.reg());
+      } else {
+        frame_registers_.Unuse(last.reg());
+      }
+    }
+  }
+}
+
+
+void VirtualFrame::Use(Register reg) {
+  frame_registers_.Use(reg);
+  cgen_->allocator()->Use(reg);
+}
+
+
+void VirtualFrame::Unuse(Register reg) {
+  frame_registers_.Unuse(reg);
+  cgen_->allocator()->Unuse(reg);
+}
+
+
+void VirtualFrame::Spill(Register target) {
+  if (!frame_registers_.is_used(target)) return;
+  for (int i = 0; i < elements_.length(); i++) {
+    if (elements_[i].is_register() && elements_[i].reg().is(target)) {
+      SpillElementAt(i);
+    }
+  }
+}
+
+
+// Spill any register if possible, making its external reference count zero.
+Register VirtualFrame::SpillAnyRegister() {
+  // Find the leftmost (ordered by register code), least
+  // internally-referenced register whose internal reference count matches
+  // its external reference count (so that spilling it from the frame frees
+  // it for use).
+  int min_count = kMaxInt;
+  int best_register_code = no_reg.code_;
+
+  for (int i = 0; i < kNumRegisters; i++) {
+    int count = frame_registers_.count(i);
+    if (count < min_count && count == cgen_->allocator()->count(i)) {
+      min_count = count;
+      best_register_code = i;
+    }
+  }
+
+  Register result = { best_register_code };
+  if (result.is_valid()) {
+    Spill(result);
+    ASSERT(!cgen_->allocator()->is_used(result));
+  }
+  return result;
+}
+
+
+// Make the type of the element at a given index be MEMORY.
+void VirtualFrame::SpillElementAt(int index) {
+  if (!elements_[index].is_valid()) return;
+
+  SyncElementAt(index);
+  if (elements_[index].is_register()) {
+    Unuse(elements_[index].reg());
+  }
+  // The element is now in memory.
+  elements_[index] = FrameElement::MemoryElement();
+}
+
+
+// Clear the dirty bits for the range of elements in [begin, end).
+void VirtualFrame::SyncRange(int begin, int end) {
+  ASSERT(begin >= 0);
+  ASSERT(end <= elements_.length());
+  for (int i = begin; i < end; i++) {
+    RawSyncElementAt(i);
+  }
+}
+
+
+// Clear the dirty bit for the element at a given index.
+void VirtualFrame::SyncElementAt(int index) {
+  if (index > stack_pointer_ + 1) {
+    SyncRange(stack_pointer_ + 1, index);
+  }
+  RawSyncElementAt(index);
+}
+
+
+// Make the type of all elements be MEMORY.
+void VirtualFrame::SpillAll() {
+  for (int i = 0; i < elements_.length(); i++) {
+    SpillElementAt(i);
+  }
+}
+
+
+void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
+  // Perform state changes on this frame that will make merge to the
+  // expected frame simpler or else increase the likelihood that his
+  // frame will match another.
+  for (int i = 0; i < elements_.length(); i++) {
+    FrameElement source = elements_[i];
+    FrameElement target = expected->elements_[i];
+
+    if (!target.is_valid() ||
+        (target.is_memory() && !source.is_memory() && source.is_synced())) {
+      // No code needs to be generated to invalidate valid elements.
+      // No code needs to be generated to move values to memory if
+      // they are already synced.  We perform those moves here, before
+      // merging.
+      if (source.is_register()) {
+        // If the frame is the code generator's current frame, we have
+        // to decrement both the frame-internal and global register
+        // counts.
+        if (cgen_->frame() == this) {
+          Unuse(source.reg());
+        } else {
+          frame_registers_.Unuse(source.reg());
+        }
+      }
+      elements_[i] = target;
+    } else if (target.is_register() && !target.is_synced() &&
+               !source.is_memory()) {
+      // If an element's target is a register that doesn't need to be
+      // synced, and the element is not in memory, then the sync state
+      // of the element is irrelevant.  We clear the sync bit.
+      ASSERT(source.is_valid());
+      elements_[i].clear_sync();
+    }
+  }
+}
+
+
+void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
+  ASSERT(height() >= dropped_args);
+  ASSERT(height() >= spilled_args);
+  ASSERT(dropped_args <= spilled_args);
+
+  int arg_base_index = elements_.length() - spilled_args;
+  // Spill the arguments.  We spill from the top down so that the
+  // backing stores of register copies will be spilled only after all
+  // the copies are spilled---it is better to spill via a
+  // register-to-memory move than a memory-to-memory move.
+  for (int i = elements_.length() - 1; i >= arg_base_index; i--) {
+    SpillElementAt(i);
+  }
+
+  // Below the arguments, spill registers and sync everything else.
+  // Syncing is necessary for the locals and parameters to give the
+  // debugger a consistent view of the frame.
+  for (int i = arg_base_index - 1; i >= 0; i--) {
+    FrameElement element = elements_[i];
+    if (element.is_register()) {
+      SpillElementAt(i);
+    } else if (element.is_valid()) {
+      SyncElementAt(i);
+    }
+  }
+
+  // Forget the frame elements that will be popped by the call.
+  Forget(dropped_args);
+}
+
+
+void VirtualFrame::DetachFromCodeGenerator() {
+  // Tell the global register allocator that it is free to reallocate all
+  // register references contained in this frame.  The frame elements remain
+  // register references, so the frame-internal reference count is not
+  // decremented.
+  for (int i = 0; i < elements_.length(); i++) {
+    if (elements_[i].is_register()) {
+      cgen_->allocator()->Unuse(elements_[i].reg());
+    }
+  }
+}
+
+
+void VirtualFrame::AttachToCodeGenerator() {
+  // Tell the global register allocator that the frame-internal register
+  // references are live again.
+  for (int i = 0; i < elements_.length(); i++) {
+    if (elements_[i].is_register()) {
+      cgen_->allocator()->Use(elements_[i].reg());
+    }
+  }
+}
+
+
+void VirtualFrame::PrepareForReturn() {
+  // Spill all locals. This is necessary to make sure all locals have
+  // the right value when breaking at the return site in the debugger.
+  //
+  // TODO(203): It is also necessary to ensure that merging at the
+  // return site does not generate code to overwrite eax, where the
+  // return value is kept in a non-refcounted register reference.
+  for (int i = 0; i < expression_base_index(); i++) SpillElementAt(i);
+}
+
+
+void VirtualFrame::SetElementAt(int index, Result* value) {
+  int frame_index = elements_.length() - index - 1;
+  ASSERT(frame_index >= 0);
+  ASSERT(frame_index < elements_.length());
+  ASSERT(value->is_valid());
+  FrameElement original = elements_[frame_index];
+
+  // Early exit if the element is the same as the one being set.
+  bool same_register = original.is_register()
+                    && value->is_register()
+                    && original.reg().is(value->reg());
+  bool same_constant = original.is_constant()
+                    && value->is_constant()
+                    && original.handle().is_identical_to(value->handle());
+  if (same_register || same_constant) {
+    value->Unuse();
+    return;
+  }
+
+  // If the original may be a copy, adjust to preserve the copy-on-write
+  // semantics of copied elements.
+  if (original.is_register() || original.is_memory()) {
+    FrameElement ignored = AdjustCopies(frame_index);
+  }
+
+  // If the original is a register reference, deallocate it.
+  if (original.is_register()) {
+    Unuse(original.reg());
+  }
+
+  FrameElement new_element;
+  if (value->is_register()) {
+    // There are two cases depending no whether the register already
+    // occurs in the frame or not.
+    if (register_count(value->reg()) == 0) {
+      Use(value->reg());
+      elements_[frame_index] =
+          FrameElement::RegisterElement(value->reg(),
+                                        FrameElement::NOT_SYNCED);
+    } else {
+      for (int i = 0; i < elements_.length(); i++) {
+        FrameElement element = elements_[i];
+        if (element.is_register() && element.reg().is(value->reg())) {
+          // The register backing store is lower in the frame than its
+          // copy.
+          if (i < frame_index) {
+            elements_[frame_index] = CopyElementAt(i);
+          } else {
+            // There was an early bailout for the case of setting a
+            // register element to itself.
+            ASSERT(i != frame_index);
+            element.clear_sync();
+            elements_[frame_index] = element;
+            elements_[i] = CopyElementAt(frame_index);
+          }
+          // Exit the loop once the appropriate copy is inserted.
+          break;
+        }
+      }
+    }
+  } else {
+    ASSERT(value->is_constant());
+    elements_[frame_index] =
+        FrameElement::ConstantElement(value->handle(),
+                                      FrameElement::NOT_SYNCED);
+  }
+  value->Unuse();
+}
+
+
+void VirtualFrame::PushFrameSlotAt(int index) {
+  FrameElement new_element = CopyElementAt(index);
+  elements_.Add(new_element);
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, int frame_arg_count) {
+  PrepareForCall(frame_arg_count, frame_arg_count);
+  return RawCallStub(stub, frame_arg_count);
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub,
+                              Result* arg,
+                              int frame_arg_count) {
+  PrepareForCall(frame_arg_count, frame_arg_count);
+  arg->Unuse();
+  return RawCallStub(stub, frame_arg_count);
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub,
+                              Result* arg0,
+                              Result* arg1,
+                              int frame_arg_count) {
+  PrepareForCall(frame_arg_count, frame_arg_count);
+  arg0->Unuse();
+  arg1->Unuse();
+  return RawCallStub(stub, frame_arg_count);
+}
+
+
+Result VirtualFrame::CallCodeObject(Handle<Code> code,
+                                    RelocInfo::Mode rmode,
+                                    int dropped_args) {
+  int spilled_args = 0;
+  switch (code->kind()) {
+    case Code::CALL_IC:
+      spilled_args = dropped_args + 1;
+      break;
+    case Code::FUNCTION:
+      spilled_args = dropped_args + 1;
+      break;
+    case Code::KEYED_LOAD_IC:
+      ASSERT(dropped_args == 0);
+      spilled_args = 2;
+      break;
+    default:
+      // The other types of code objects are called with values
+      // in specific registers, and are handled in functions with
+      // a different signature.
+      UNREACHABLE();
+      break;
+  }
+  PrepareForCall(spilled_args, dropped_args);
+  return RawCallCodeObject(code, rmode);
+}
+
+
+void VirtualFrame::Push(Register reg) {
+  FrameElement new_element;
+  if (register_count(reg) == 0) {
+    Use(reg);
+    new_element =
+        FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED);
+  } else {
+    for (int i = 0; i < elements_.length(); i++) {
+      FrameElement element = elements_[i];
+      if (element.is_register() && element.reg().is(reg)) {
+        new_element = CopyElementAt(i);
+        break;
+      }
+    }
+  }
+  elements_.Add(new_element);
+}
+
+
+void VirtualFrame::Push(Handle<Object> value) {
+  elements_.Add(FrameElement::ConstantElement(value,
+                                              FrameElement::NOT_SYNCED));
+}
+
+
+void VirtualFrame::Push(Result* result) {
+  if (result->is_register()) {
+    Push(result->reg());
+  } else {
+    ASSERT(result->is_constant());
+    Push(result->handle());
+  }
+  result->Unuse();
+}
+
+
+void VirtualFrame::Nip(int num_dropped) {
+  ASSERT(num_dropped >= 0);
+  if (num_dropped == 0) return;
+  Result tos = Pop();
+  if (num_dropped > 1) {
+    Drop(num_dropped - 1);
+  }
+  SetElementAt(0, &tos);
+}
+
+
+bool FrameElement::Equals(FrameElement other) {
+  if (type() != other.type()) return false;
+  if (is_synced() != other.is_synced()) return false;
+
+  if (is_register()) {
+    if (!reg().is(other.reg())) return false;
+  } else if (is_constant()) {
+    if (!handle().is_identical_to(other.handle())) return false;
+  } else if (is_copy()) {
+    if (index() != other.index()) return false;
+  }
+
+  return true;
+}
+
+
+bool VirtualFrame::Equals(VirtualFrame* other) {
+  if (cgen_ != other->cgen_) return false;
+  if (masm_ != other->masm_) return false;
+  if (elements_.length() != other->elements_.length()) return false;
+
+  for (int i = 0; i < elements_.length(); i++) {
+    if (!elements_[i].Equals(other->elements_[i])) return false;
+  }
+
+  if (parameter_count_ != other->parameter_count_) return false;
+  if (local_count_ != other->local_count_) return false;
+  if (stack_pointer_ != other->stack_pointer_) return false;
+  if (frame_pointer_ != other->frame_pointer_) return false;
+
+  for (int i = 0; i < kNumRegisters; i++) {
+    if (frame_registers_.count(i) != other->frame_registers_.count(i)) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+} }  // namespace v8::internal
diff --git a/src/virtual-frame.h b/src/virtual-frame.h
new file mode 100644
index 0000000..4702ed4
--- /dev/null
+++ b/src/virtual-frame.h
@@ -0,0 +1,167 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VIRTUAL_FRAME_H_
+#define V8_VIRTUAL_FRAME_H_
+
+#include "macro-assembler.h"
+#include "register-allocator.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frame elements
+//
+// The internal elements of the virtual frames.  There are several kinds of
+// elements:
+//   * Invalid: elements that are uninitialized or not actually part
+//     of the virtual frame.  They should not be read.
+//   * Memory: an element that resides in the actual frame.  Its address is
+//     given by its position in the virtual frame.
+//   * Register: an element that resides in a register.
+//   * Constant: an element whose value is known at compile time.
+
+class FrameElement BASE_EMBEDDED {
+ public:
+  enum SyncFlag {
+    SYNCED,
+    NOT_SYNCED
+  };
+
+  // The default constructor creates an invalid frame element.
+  FrameElement() {
+    type_ = TypeField::encode(INVALID) | SyncField::encode(NOT_SYNCED);
+    data_.reg_ = no_reg;
+  }
+
+  // Factory function to construct an invalid frame element.
+  static FrameElement InvalidElement() {
+    FrameElement result;
+    return result;
+  }
+
+  // Factory function to construct an in-memory frame element.
+  static FrameElement MemoryElement() {
+    FrameElement result;
+    result.type_ = TypeField::encode(MEMORY) | SyncField::encode(SYNCED);
+    // In-memory elements have no useful data.
+    result.data_.reg_ = no_reg;
+    return result;
+  }
+
+  // Factory function to construct an in-register frame element.
+  static FrameElement RegisterElement(Register reg, SyncFlag is_synced) {
+    FrameElement result;
+    result.type_ = TypeField::encode(REGISTER) | SyncField::encode(is_synced);
+    result.data_.reg_ = reg;
+    return result;
+  }
+
+  // Factory function to construct a frame element whose value is known at
+  // compile time.
+  static FrameElement ConstantElement(Handle<Object> value,
+                                      SyncFlag is_synced) {
+    FrameElement result;
+    result.type_ = TypeField::encode(CONSTANT) | SyncField::encode(is_synced);
+    result.data_.handle_ = value.location();
+    return result;
+  }
+
+  bool is_synced() const { return SyncField::decode(type_) == SYNCED; }
+
+  void set_sync() {
+    ASSERT(type() != MEMORY);
+    type_ = (type_ & ~SyncField::mask()) | SyncField::encode(SYNCED);
+  }
+
+  void clear_sync() {
+    ASSERT(type() != MEMORY);
+    type_ = (type_ & ~SyncField::mask()) | SyncField::encode(NOT_SYNCED);
+  }
+
+  bool is_valid() const { return type() != INVALID; }
+  bool is_memory() const { return type() == MEMORY; }
+  bool is_register() const { return type() == REGISTER; }
+  bool is_constant() const { return type() == CONSTANT; }
+  bool is_copy() const { return type() == COPY; }
+
+  Register reg() const {
+    ASSERT(is_register());
+    return data_.reg_;
+  }
+
+  Handle<Object> handle() const {
+    ASSERT(is_constant());
+    return Handle<Object>(data_.handle_);
+  }
+
+  int index() const {
+    ASSERT(is_copy());
+    return data_.index_;
+  }
+
+  bool Equals(FrameElement other);
+
+ private:
+  enum Type {
+    INVALID,
+    MEMORY,
+    REGISTER,
+    CONSTANT,
+    COPY
+  };
+
+  // BitField is <type, shift, size>.
+  class SyncField : public BitField<SyncFlag, 0, 1> {};
+  class TypeField : public BitField<Type, 1, 32 - 1> {};
+
+  Type type() const { return TypeField::decode(type_); }
+
+  // The element's type and a dirty bit.  The dirty bit can be cleared
+  // for non-memory elements to indicate that the element agrees with
+  // the value in memory in the actual frame.
+  int type_;
+
+  union {
+    Register reg_;
+    Object** handle_;
+    int index_;
+  } data_;
+
+  friend class VirtualFrame;
+};
+
+
+} }  // namespace v8::internal
+
+#ifdef ARM
+#include "virtual-frame-arm.h"
+#else  // ia32
+#include "virtual-frame-ia32.h"
+#endif
+
+#endif  // V8_VIRTUAL_FRAME_H_
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index c2b6748..091768f 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -34,15 +34,33 @@
 
 SOURCES = {
   'all': [
-    'test-hashmap.cc', 'test-debug.cc', 'test-api.cc', 'test-flags.cc',
-    'test-ast.cc', 'test-heap.cc', 'test-utils.cc', 'test-compiler.cc',
-    'test-spaces.cc', 'test-mark-compact.cc', 'test-lock.cc',
-    'test-conversions.cc', 'test-strings.cc', 'test-serialize.cc',
-    'test-decls.cc', 'test-alloc.cc', 'test-regexp.cc', 'test-threads.cc',
-    'test-sockets.cc'
+    'test-alloc.cc',
+    'test-api.cc',
+    'test-ast.cc',
+    'test-compiler.cc',
+    'test-conversions.cc',
+    'test-debug.cc',
+    'test-decls.cc',
+    'test-flags.cc',
+    'test-hashmap.cc',
+    'test-heap.cc',
+    'test-list.cc',
+    'test-lock.cc',
+    'test-mark-compact.cc',
+    'test-regexp.cc',
+    'test-serialize.cc',
+    'test-sockets.cc',
+    'test-spaces.cc',
+    'test-strings.cc',
+    'test-threads.cc',
+    'test-utils.cc'
   ],
   'arch:arm':  ['test-assembler-arm.cc', 'test-disasm-arm.cc'],
-  'arch:ia32': ['test-assembler-ia32.cc', 'test-disasm-ia32.cc', 'test-log-ia32.cc'],
+  'arch:ia32': [
+    'test-assembler-ia32.cc',
+    'test-disasm-ia32.cc',
+    'test-log-ia32.cc'
+  ],
   'os:linux':  ['test-platform-linux.cc'],
   'os:macos':  ['test-platform-macos.cc'],
   'os:nullos': ['test-platform-nullos.cc'],
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index aa81dd1..9ff11ba 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -39,3 +39,11 @@
 
 # BUG(240): Test seems flaky on ARM.
 test-api/RegExpInterruption: SKIP
+
+# BUG(271): After exception propagation changes that compares pointers
+# into the stack, these tests fail on the arm simulator (but pass on
+# the arm hardware) because the JS stack is not combined with the C
+# stack in the simulator.  Disabling while we consider how to solve
+# the issue for the simulator.
+test-api/ExceptionOrder: PASS || FAIL
+test-api/TryCatchInTryFinally: PASS || FAIL
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 6868b81..c6df1ac 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -1722,7 +1722,8 @@
   if (args.Length() < 1) return v8::Boolean::New(false);
   v8::HandleScope scope;
   v8::TryCatch try_catch;
-  v8::Script::Compile(args[0]->ToString())->Run();
+  Local<Value> result = v8::Script::Compile(args[0]->ToString())->Run();
+  CHECK(!try_catch.HasCaught() || result.IsEmpty());
   return v8::Boolean::New(try_catch.HasCaught());
 }
 
@@ -1759,7 +1760,11 @@
 
 // Test that a try-finally block doesn't shadow a try-catch block
 // when setting up an external handler.
-THREADED_TEST(TryCatchInTryFinally) {
+//
+// TODO(271): This should be a threaded test. It was disabled for the
+// thread tests because it fails on the ARM simulator.  Should be made
+// threadable again when the simulator issue is resolved.
+TEST(TryCatchInTryFinally) {
   v8::HandleScope scope;
   Local<ObjectTemplate> templ = ObjectTemplate::New();
   templ->Set(v8_str("CCatcher"),
@@ -1806,8 +1811,9 @@
   LocalContext context(0, templ);
   v8::TryCatch try_catch;
   try_catch.SetVerbose(true);
-  CompileRun("ThrowFromC();");
+  Local<Value> result = CompileRun("ThrowFromC();");
   CHECK(try_catch.HasCaught());
+  CHECK(result.IsEmpty());
   CHECK(message_received);
   v8::V8::RemoveMessageListeners(check_message);
 }
@@ -1853,6 +1859,7 @@
       int expected = args[3]->Int32Value();
       if (try_catch.HasCaught()) {
         CHECK_EQ(expected, count);
+        CHECK(result.IsEmpty());
         CHECK(!i::Top::has_scheduled_exception());
       } else {
         CHECK_NE(expected, count);
@@ -1910,7 +1917,11 @@
 // Each entry is an activation, either JS or C.  The index is the count at that
 // level.  Stars identify activations with exception handlers, the @ identifies
 // the exception handler that should catch the exception.
-THREADED_TEST(ExceptionOrder) {
+//
+// TODO(271): This should be a threaded test. It was disabled for the
+// thread tests because it fails on the ARM simulator.  Should be made
+// threadable again when the simulator issue is resolved.
+TEST(ExceptionOrder) {
   v8::HandleScope scope;
   Local<ObjectTemplate> templ = ObjectTemplate::New();
   templ->Set(v8_str("check"), v8::FunctionTemplate::New(JSCheck));
@@ -5320,6 +5331,28 @@
 }
 
 
+static void CheckTryCatchSourceInfo(v8::Handle<v8::Script> script,
+                                    const char* resource_name,
+                                    int line_offset) {
+  v8::HandleScope scope;
+  v8::TryCatch try_catch;
+  v8::Handle<v8::Value> result = script->Run();
+  CHECK(result.IsEmpty());
+  CHECK(try_catch.HasCaught());
+  v8::Handle<v8::Message> message = try_catch.Message();
+  CHECK(!message.IsEmpty());
+  CHECK_EQ(10 + line_offset, message->GetLineNumber());
+  CHECK_EQ(91, message->GetStartPosition());
+  CHECK_EQ(92, message->GetEndPosition());
+  CHECK_EQ(2, message->GetStartColumn());
+  CHECK_EQ(3, message->GetEndColumn());
+  v8::String::AsciiValue line(message->GetSourceLine());
+  CHECK_EQ("  throw 'nirk';", *line);
+  v8::String::AsciiValue name(message->GetScriptResourceName());
+  CHECK_EQ(resource_name, *name);
+}
+
+
 THREADED_TEST(TryCatchSourceInfo) {
   v8::HandleScope scope;
   LocalContext context;
@@ -5337,23 +5370,22 @@
       "}\n"
       "\n"
       "Foo();\n");
-  v8::Handle<v8::Script> script =
-      v8::Script::Compile(source, v8::String::New("test.js"));
-  v8::TryCatch try_catch;
-  v8::Handle<v8::Value> result = script->Run();
-  CHECK(result.IsEmpty());
-  CHECK(try_catch.HasCaught());
-  v8::Handle<v8::Message> message = try_catch.Message();
-  CHECK(!message.IsEmpty());
-  CHECK_EQ(10, message->GetLineNumber());
-  CHECK_EQ(91, message->GetStartPosition());
-  CHECK_EQ(92, message->GetEndPosition());
-  CHECK_EQ(2, message->GetStartColumn());
-  CHECK_EQ(3, message->GetEndColumn());
-  v8::String::AsciiValue line(message->GetSourceLine());
-  CHECK_EQ("  throw 'nirk';", *line);
-  v8::String::AsciiValue name(message->GetScriptResourceName());
-  CHECK_EQ("test.js", *name);
+
+  const char* resource_name;
+  v8::Handle<v8::Script> script;
+  resource_name = "test.js";
+  script = v8::Script::Compile(source, v8::String::New(resource_name));
+  CheckTryCatchSourceInfo(script, resource_name, 0);
+
+  resource_name = "test1.js";
+  v8::ScriptOrigin origin1(v8::String::New(resource_name));
+  script = v8::Script::Compile(source, &origin1);
+  CheckTryCatchSourceInfo(script, resource_name, 0);
+
+  resource_name = "test2.js";
+  v8::ScriptOrigin origin2(v8::String::New(resource_name), v8::Integer::New(7));
+  script = v8::Script::Compile(source, &origin2);
+  CheckTryCatchSourceInfo(script, resource_name, 7);
 }
 
 
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index b4da9d5..08037b3 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -312,7 +312,7 @@
 
   Handle<Script> script = Factory::NewScript(Factory::empty_string());
   script->set_source(Heap::undefined_value());
-  CHECK_EQ(-1, script->GetLineNumber(0));
-  CHECK_EQ(-1, script->GetLineNumber(100));
-  CHECK_EQ(-1, script->GetLineNumber(-1));
+  CHECK_EQ(-1, GetScriptLineNumber(script, 0));
+  CHECK_EQ(-1, GetScriptLineNumber(script, 100));
+  CHECK_EQ(-1, GetScriptLineNumber(script, -1));
 }
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index f8a2dd0..280d507 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -210,20 +210,46 @@
 }
 
 
-// Set a break point in a script using the global Debug object.
-static int SetScriptBreakPointFromJS(const char* script_data,
-                                     int line, int column) {
+// Set a break point in a script identified by id using the global Debug object.
+static int SetScriptBreakPointByIdFromJS(int script_id, int line, int column) {
   EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
   if (column >= 0) {
     // Column specified set script break point on precise location.
     OS::SNPrintF(buffer,
-                 "debug.Debug.setScriptBreakPoint(\"%s\",%d,%d)",
-                 script_data, line, column);
+                 "debug.Debug.setScriptBreakPointById(%d,%d,%d)",
+                 script_id, line, column);
   } else {
     // Column not specified set script break point on line.
     OS::SNPrintF(buffer,
-                 "debug.Debug.setScriptBreakPoint(\"%s\",%d)",
-                 script_data, line);
+                 "debug.Debug.setScriptBreakPointById(%d,%d)",
+                 script_id, line);
+  }
+  buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
+  {
+    v8::TryCatch try_catch;
+    v8::Handle<v8::String> str = v8::String::New(buffer.start());
+    v8::Handle<v8::Value> value = v8::Script::Compile(str)->Run();
+    ASSERT(!try_catch.HasCaught());
+    return value->Int32Value();
+  }
+}
+
+
+// Set a break point in a script identified by name using the global Debug
+// object.
+static int SetScriptBreakPointByNameFromJS(const char* script_name,
+                                           int line, int column) {
+  EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
+  if (column >= 0) {
+    // Column specified set script break point on precise location.
+    OS::SNPrintF(buffer,
+                 "debug.Debug.setScriptBreakPointByName(\"%s\",%d,%d)",
+                 script_name, line, column);
+  } else {
+    // Column not specified set script break point on line.
+    OS::SNPrintF(buffer,
+                 "debug.Debug.setScriptBreakPointByName(\"%s\",%d)",
+                 script_name, line);
   }
   buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
   {
@@ -511,7 +537,7 @@
                                          v8::Handle<v8::Object> event_data,
                                          v8::Handle<v8::Value> data) {
   // When hitting a debug event listener there must be a break set.
-  CHECK(v8::internal::Top::is_break());
+  CHECK_NE(v8::internal::Debug::break_id(), 0);
 
   // Count the number of breaks.
   if (event == v8::Break) {
@@ -551,7 +577,7 @@
                               v8::Handle<v8::Object> event_data,
                               v8::Handle<v8::Value> data) {
   // When hitting a debug event listener there must be a break set.
-  CHECK(v8::internal::Top::is_break());
+  CHECK_NE(v8::internal::Debug::break_id(), 0);
 
   // Count the number of breaks.
   if (event == v8::Break) {
@@ -609,7 +635,7 @@
                                v8::Handle<v8::Object> event_data,
                                v8::Handle<v8::Value> data) {
   // When hitting a debug event listener there must be a break set.
-  CHECK(v8::internal::Top::is_break());
+  CHECK_NE(v8::internal::Debug::break_id(), 0);
 
   if (event == v8::Break) {
     for (int i = 0; checks[i].expr != NULL; i++) {
@@ -635,7 +661,7 @@
                                        v8::Handle<v8::Object> event_data,
                                        v8::Handle<v8::Value> data) {
   // When hitting a debug event listener there must be a break set.
-  CHECK(v8::internal::Top::is_break());
+  CHECK_NE(v8::internal::Debug::break_id(), 0);
 
   if (event == v8::Break) {
     break_point_hit_count++;
@@ -653,7 +679,7 @@
                            v8::Handle<v8::Object> event_data,
                            v8::Handle<v8::Value> data) {
   // When hitting a debug event listener there must be a break set.
-  CHECK(v8::internal::Top::is_break());
+  CHECK_NE(v8::internal::Debug::break_id(), 0);
 
   if (event == v8::Break) {
     break_point_hit_count++;
@@ -679,7 +705,7 @@
                                    v8::Handle<v8::Object> event_data,
                                    v8::Handle<v8::Value> data) {
   // When hitting a debug event listener there must be a break set.
-  CHECK(v8::internal::Top::is_break());
+  CHECK_NE(v8::internal::Debug::break_id(), 0);
 
   if (event == v8::Break || event == v8::Exception) {
     // Check that the current function is the expected.
@@ -709,7 +735,7 @@
     v8::Handle<v8::Object> event_data,
     v8::Handle<v8::Value> data) {
   // When hitting a debug event listener there must be a break set.
-  CHECK(v8::internal::Top::is_break());
+  CHECK_NE(v8::internal::Debug::break_id(), 0);
 
   // Perform a garbage collection when break point is hit and continue. Based
   // on the number of break points hit either scavenge or mark compact
@@ -734,7 +760,7 @@
                             v8::Handle<v8::Object> event_data,
                             v8::Handle<v8::Value> data) {
   // When hitting a debug event listener there must be a break set.
-  CHECK(v8::internal::Top::is_break());
+  CHECK_NE(v8::internal::Debug::break_id(), 0);
 
   if (event == v8::Break) {
     // Count the number of breaks.
@@ -1166,8 +1192,9 @@
 }
 
 
-// Test that break points can be set using the global Debug object.
-TEST(ScriptBreakPointThroughJavaScript) {
+// Test that break points on scripts identified by name can be set using the
+// global Debug object.
+TEST(ScriptBreakPointByNameThroughJavaScript) {
   break_point_hit_count = 0;
   v8::HandleScope scope;
   DebugLocalContext env;
@@ -1175,7 +1202,6 @@
 
   v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount,
                                    v8::Undefined());
-  v8::Script::Compile(v8::String::New("function foo(){bar();bar();}"))->Run();
 
   v8::Local<v8::String> script = v8::String::New(
     "function f() {\n"
@@ -1213,7 +1239,7 @@
   CHECK_EQ(0, break_point_hit_count);
 
   // Call f and g with break point on line 12.
-  int sbp1 = SetScriptBreakPointFromJS("test", 12, 0);
+  int sbp1 = SetScriptBreakPointByNameFromJS("test", 12, 0);
   break_point_hit_count = 0;
   f->Call(env->Global(), 0, NULL);
   CHECK_EQ(0, break_point_hit_count);
@@ -1229,7 +1255,7 @@
   CHECK_EQ(0, break_point_hit_count);
 
   // Call f and g with break point on line 2.
-  int sbp2 = SetScriptBreakPointFromJS("test", 2, 0);
+  int sbp2 = SetScriptBreakPointByNameFromJS("test", 2, 0);
   break_point_hit_count = 0;
   f->Call(env->Global(), 0, NULL);
   CHECK_EQ(1, break_point_hit_count);
@@ -1237,17 +1263,17 @@
   CHECK_EQ(2, break_point_hit_count);
 
   // Call f and g with break point on line 2, 4, 12, 14 and 15.
-  int sbp3 = SetScriptBreakPointFromJS("test", 4, 0);
-  int sbp4 = SetScriptBreakPointFromJS("test", 12, 0);
-  int sbp5 = SetScriptBreakPointFromJS("test", 14, 0);
-  int sbp6 = SetScriptBreakPointFromJS("test", 15, 0);
+  int sbp3 = SetScriptBreakPointByNameFromJS("test", 4, 0);
+  int sbp4 = SetScriptBreakPointByNameFromJS("test", 12, 0);
+  int sbp5 = SetScriptBreakPointByNameFromJS("test", 14, 0);
+  int sbp6 = SetScriptBreakPointByNameFromJS("test", 15, 0);
   break_point_hit_count = 0;
   f->Call(env->Global(), 0, NULL);
   CHECK_EQ(2, break_point_hit_count);
   g->Call(env->Global(), 0, NULL);
   CHECK_EQ(7, break_point_hit_count);
 
-  // Remove the all the break points again.
+  // Remove all the break points again.
   break_point_hit_count = 0;
   ClearBreakPointFromJS(sbp2);
   ClearBreakPointFromJS(sbp3);
@@ -1259,19 +1285,114 @@
   g->Call(env->Global(), 0, NULL);
   CHECK_EQ(0, break_point_hit_count);
 
-  // Now set a function break point
-  int bp7 = SetBreakPointFromJS("g", 0, 0);
+  v8::Debug::SetDebugEventListener(NULL);
+  CheckDebuggerUnloaded();
+
+  // Make sure that the break point numbers are consecutive.
+  CHECK_EQ(1, sbp1);
+  CHECK_EQ(2, sbp2);
+  CHECK_EQ(3, sbp3);
+  CHECK_EQ(4, sbp4);
+  CHECK_EQ(5, sbp5);
+  CHECK_EQ(6, sbp6);
+}
+
+
+TEST(ScriptBreakPointByIdThroughJavaScript) {
+  break_point_hit_count = 0;
+  v8::HandleScope scope;
+  DebugLocalContext env;
+  env.ExposeDebug();
+
+  v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount,
+                                   v8::Undefined());
+
+  v8::Local<v8::String> source = v8::String::New(
+    "function f() {\n"
+    "  function h() {\n"
+    "    a = 0;  // line 2\n"
+    "  }\n"
+    "  b = 1;  // line 4\n"
+    "  return h();\n"
+    "}\n"
+    "\n"
+    "function g() {\n"
+    "  function h() {\n"
+    "    a = 0;\n"
+    "  }\n"
+    "  b = 2;  // line 12\n"
+    "  h();\n"
+    "  b = 3;  // line 14\n"
+    "  f();    // line 15\n"
+    "}");
+
+  // Compile the script and get the two functions.
+  v8::ScriptOrigin origin =
+      v8::ScriptOrigin(v8::String::New("test"));
+  v8::Local<v8::Script> script = v8::Script::Compile(source, &origin);
+  script->Run();
+  v8::Local<v8::Function> f =
+      v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+  v8::Local<v8::Function> g =
+      v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("g")));
+
+  // Get the script id knowing that internally it is a 32 integer.
+  uint32_t script_id = script->Id()->Uint32Value();
+
+  // Call f and g without break points.
+  break_point_hit_count = 0;
+  f->Call(env->Global(), 0, NULL);
+  CHECK_EQ(0, break_point_hit_count);
+  g->Call(env->Global(), 0, NULL);
+  CHECK_EQ(0, break_point_hit_count);
+
+  // Call f and g with break point on line 12.
+  int sbp1 = SetScriptBreakPointByIdFromJS(script_id, 12, 0);
+  break_point_hit_count = 0;
+  f->Call(env->Global(), 0, NULL);
+  CHECK_EQ(0, break_point_hit_count);
   g->Call(env->Global(), 0, NULL);
   CHECK_EQ(1, break_point_hit_count);
 
-  // Reload the script and get g again checking that the break point survives.
-  // This tests that the function break point was converted to a script break
-  // point.
-  v8::Script::Compile(script, &origin)->Run();
-  g = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("g")));
+  // Remove the break point again.
+  break_point_hit_count = 0;
+  ClearBreakPointFromJS(sbp1);
+  f->Call(env->Global(), 0, NULL);
+  CHECK_EQ(0, break_point_hit_count);
+  g->Call(env->Global(), 0, NULL);
+  CHECK_EQ(0, break_point_hit_count);
+
+  // Call f and g with break point on line 2.
+  int sbp2 = SetScriptBreakPointByIdFromJS(script_id, 2, 0);
+  break_point_hit_count = 0;
+  f->Call(env->Global(), 0, NULL);
+  CHECK_EQ(1, break_point_hit_count);
   g->Call(env->Global(), 0, NULL);
   CHECK_EQ(2, break_point_hit_count);
 
+  // Call f and g with break point on line 2, 4, 12, 14 and 15.
+  int sbp3 = SetScriptBreakPointByIdFromJS(script_id, 4, 0);
+  int sbp4 = SetScriptBreakPointByIdFromJS(script_id, 12, 0);
+  int sbp5 = SetScriptBreakPointByIdFromJS(script_id, 14, 0);
+  int sbp6 = SetScriptBreakPointByIdFromJS(script_id, 15, 0);
+  break_point_hit_count = 0;
+  f->Call(env->Global(), 0, NULL);
+  CHECK_EQ(2, break_point_hit_count);
+  g->Call(env->Global(), 0, NULL);
+  CHECK_EQ(7, break_point_hit_count);
+
+  // Remove all the break points again.
+  break_point_hit_count = 0;
+  ClearBreakPointFromJS(sbp2);
+  ClearBreakPointFromJS(sbp3);
+  ClearBreakPointFromJS(sbp4);
+  ClearBreakPointFromJS(sbp5);
+  ClearBreakPointFromJS(sbp6);
+  f->Call(env->Global(), 0, NULL);
+  CHECK_EQ(0, break_point_hit_count);
+  g->Call(env->Global(), 0, NULL);
+  CHECK_EQ(0, break_point_hit_count);
+
   v8::Debug::SetDebugEventListener(NULL);
   CheckDebuggerUnloaded();
 
@@ -1282,7 +1403,6 @@
   CHECK_EQ(4, sbp4);
   CHECK_EQ(5, sbp5);
   CHECK_EQ(6, sbp6);
-  CHECK_EQ(7, bp7);
 }
 
 
@@ -1309,7 +1429,7 @@
       v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
 
   // Set script break point on line 1 (in function f).
-  int sbp = SetScriptBreakPointFromJS("test", 1, 0);
+  int sbp = SetScriptBreakPointByNameFromJS("test", 1, 0);
 
   // Call f while enabeling and disabling the script break point.
   break_point_hit_count = 0;
@@ -1370,7 +1490,7 @@
       v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
 
   // Set script break point on line 5 (in function g).
-  int sbp1 = SetScriptBreakPointFromJS("test", 5, 0);
+  int sbp1 = SetScriptBreakPointByNameFromJS("test", 5, 0);
 
   // Call f with different conditions on the script break point.
   break_point_hit_count = 0;
@@ -1428,7 +1548,7 @@
       v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
 
   // Set script break point on line 1 (in function f).
-  int sbp = SetScriptBreakPointFromJS("test", 1, 0);
+  int sbp = SetScriptBreakPointByNameFromJS("test", 1, 0);
 
   // Call f with different ignores on the script break point.
   break_point_hit_count = 0;
@@ -1484,7 +1604,7 @@
   v8::ScriptOrigin origin_2 = v8::ScriptOrigin(v8::String::New("2"));
 
   // Set a script break point before the script is loaded.
-  SetScriptBreakPointFromJS("1", 2, 0);
+  SetScriptBreakPointByNameFromJS("1", 2, 0);
 
   // Compile the script and get the function.
   v8::Script::Compile(script, &origin_1)->Run();
@@ -1545,7 +1665,7 @@
       v8::ScriptOrigin(v8::String::New("test"));
 
   // Set a script break point before the scripts are loaded.
-  int sbp = SetScriptBreakPointFromJS("test", 1, 0);
+  int sbp = SetScriptBreakPointByNameFromJS("test", 1, 0);
 
   // Compile the scripts with same script data and get the functions.
   v8::Script::Compile(script_f, &origin)->Run();
@@ -1571,7 +1691,7 @@
   CHECK_EQ(0, break_point_hit_count);
 
   // Set script break point with the scripts loaded.
-  sbp = SetScriptBreakPointFromJS("test", 1, 0);
+  sbp = SetScriptBreakPointByNameFromJS("test", 1, 0);
 
   // Call f and g and check that the script break point is active.
   break_point_hit_count = 0;
@@ -1607,8 +1727,8 @@
                           v8::Integer::New(7));
 
   // Set two script break points before the script is loaded.
-  int sbp1 = SetScriptBreakPointFromJS("test.html", 8, 0);
-  int sbp2 = SetScriptBreakPointFromJS("test.html", 9, 0);
+  int sbp1 = SetScriptBreakPointByNameFromJS("test.html", 8, 0);
+  int sbp2 = SetScriptBreakPointByNameFromJS("test.html", 9, 0);
 
   // Compile the script and get the function.
   v8::Script::Compile(script, &origin)->Run();
@@ -1629,7 +1749,7 @@
   CHECK_EQ(0, break_point_hit_count);
 
   // Set a script break point with the script loaded.
-  sbp1 = SetScriptBreakPointFromJS("test.html", 9, 0);
+  sbp1 = SetScriptBreakPointByNameFromJS("test.html", 9, 0);
 
   // Call f and check that the script break point is active.
   break_point_hit_count = 0;
@@ -1673,9 +1793,9 @@
     " a=5;                      // line 12");
 
   // Set a couple script break point before the script is loaded.
-  int sbp1 = SetScriptBreakPointFromJS("test.html", 0, -1);
-  int sbp2 = SetScriptBreakPointFromJS("test.html", 1, -1);
-  int sbp3 = SetScriptBreakPointFromJS("test.html", 5, -1);
+  int sbp1 = SetScriptBreakPointByNameFromJS("test.html", 0, -1);
+  int sbp2 = SetScriptBreakPointByNameFromJS("test.html", 1, -1);
+  int sbp3 = SetScriptBreakPointByNameFromJS("test.html", 5, -1);
 
   // Compile the script and get the function.
   break_point_hit_count = 0;
@@ -1700,7 +1820,7 @@
 
   // Clear the script break point on g and set one on h.
   ClearBreakPointFromJS(sbp3);
-  int sbp4 = SetScriptBreakPointFromJS("test.html", 6, -1);
+  int sbp4 = SetScriptBreakPointByNameFromJS("test.html", 6, -1);
 
   // Call g and check that the script break point in h is hit.
   g->Call(env->Global(), 0, NULL);
@@ -1712,7 +1832,7 @@
   // more.
   ClearBreakPointFromJS(sbp2);
   ClearBreakPointFromJS(sbp4);
-  int sbp5 = SetScriptBreakPointFromJS("test.html", 4, -1);
+  int sbp5 = SetScriptBreakPointByNameFromJS("test.html", 4, -1);
   break_point_hit_count = 0;
   f->Call(env->Global(), 0, NULL);
   g->Call(env->Global(), 0, NULL);
@@ -1725,7 +1845,7 @@
   CHECK_EQ(0, strlen(last_function_hit));
 
   // Set a break point in the code after the last function decleration.
-  int sbp6 = SetScriptBreakPointFromJS("test.html", 12, -1);
+  int sbp6 = SetScriptBreakPointByNameFromJS("test.html", 12, -1);
 
   // Reload the script which should hit three break points.
   break_point_hit_count = 0;
@@ -3695,4 +3815,3 @@
   // The host dispatch callback should be called.
   CHECK_EQ(1, host_dispatch_hit_count);
 }
-
diff --git a/test/cctest/test-list.cc b/test/cctest/test-list.cc
new file mode 100644
index 0000000..d10cdd7
--- /dev/null
+++ b/test/cctest/test-list.cc
@@ -0,0 +1,67 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include <string.h>
+#include "v8.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+// Use a testing allocator that clears memory before deletion.
+class ZeroingAllocationPolicy {
+ public:
+  static void* New(size_t size) {
+    // Stash the size in the first word to use for Delete.
+    size_t true_size = size + sizeof(size_t);
+    size_t* result = reinterpret_cast<size_t*>(malloc(true_size));
+    if (result == NULL) return result;
+    *result = true_size;
+    return result + 1;
+  }
+
+  static void Delete(void* ptr) {
+    size_t* true_ptr = reinterpret_cast<size_t*>(ptr) - 1;
+    memset(true_ptr, 0, *true_ptr);
+    free(true_ptr);
+  }
+};
+
+// Check that we can add (a reference to) an element of the list
+// itself.
+TEST(ListAdd) {
+  // Add elements to the list to grow it to its capacity.
+  List<int, ZeroingAllocationPolicy> list(4);
+  list.Add(1);
+  list.Add(2);
+  list.Add(3);
+  list.Add(4);
+
+  // Add an existing element, the backing store should have to grow.
+  list.Add(list[0]);
+  ASSERT(list[4] == 1);
+}
diff --git a/test/cctest/test-log-ia32.cc b/test/cctest/test-log-ia32.cc
index 9e2d2f4..588be71 100644
--- a/test/cctest/test-log-ia32.cc
+++ b/test/cctest/test-log-ia32.cc
@@ -43,7 +43,7 @@
 static void DoTrace(unsigned int fp) {
   trace_env.sample->fp = fp;
   // something that is less than fp
-  trace_env.sample->sp = trace_env.sample->fp - sizeof(unsigned int);
+  trace_env.sample->sp = trace_env.sample->fp - 100;
   trace_env.tracer->Trace(trace_env.sample);
 }
 
@@ -94,9 +94,9 @@
 #ifdef DEBUG
   // C stack trace works only in debug mode, in release mode EBP is
   // usually treated as a general-purpose register
+  CHECK_GT(sample.frames_count, 0);
   CheckRetAddrIsInCFunction(reinterpret_cast<unsigned int>(sample.stack[0]),
                             reinterpret_cast<unsigned int>(&CFunc));
-  CHECK_EQ(0, sample.stack[1]);
 #endif
 }
 
@@ -217,15 +217,18 @@
       "  JSFuncDoTrace();"
       "};\n"
       "JSTrace();");
+  CHECK_GT(sample.frames_count, 1);
+  CheckRetAddrIsInFunction(
+      reinterpret_cast<unsigned int>(sample.stack[0]),
+      reinterpret_cast<unsigned int>(call_trace_code->instruction_start()),
+      call_trace_code->instruction_size());
   Handle<JSFunction> js_trace(JSFunction::cast(*(v8::Utils::OpenHandle(
       *GetGlobalProperty("JSTrace")))));
   v8::internal::Code* js_trace_code = js_trace->code();
   CheckRetAddrIsInFunction(
-      reinterpret_cast<unsigned int>(sample.stack[0]),
+      reinterpret_cast<unsigned int>(sample.stack[1]),
       reinterpret_cast<unsigned int>(js_trace_code->instruction_start()),
       js_trace_code->instruction_size());
-  CHECK_EQ(0, sample.stack[1]);
 }
 
 #endif  // ENABLE_LOGGING_AND_PROFILING
-
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index 5945fe7..ed2e9ab 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -214,6 +214,7 @@
   CHECK_PARSE_EQ("\\x34", "'\x34'");
   CHECK_PARSE_EQ("\\x60", "'\x60'");
   CHECK_PARSE_EQ("\\x3z", "'x3z'");
+  CHECK_PARSE_EQ("\\c", "'c'");
   CHECK_PARSE_EQ("\\u0034", "'\x34'");
   CHECK_PARSE_EQ("\\u003z", "'u003z'");
   CHECK_PARSE_EQ("foo[z]*", "(: 'foo' (# 0 - g [z]))");
@@ -363,8 +364,6 @@
   const char* kUnterminatedCharacterClass = "Unterminated character class";
   ExpectError("[", kUnterminatedCharacterClass);
   ExpectError("[a-", kUnterminatedCharacterClass);
-  const char* kEndControl = "\\c at end of pattern";
-  ExpectError("\\c", kEndControl);
   const char* kNothingToRepeat = "Nothing to repeat";
   ExpectError("*", kNothingToRepeat);
   ExpectError("?", kNothingToRepeat);
diff --git a/test/cctest/test-sockets.cc b/test/cctest/test-sockets.cc
index 187bbe0..9316a26 100644
--- a/test/cctest/test-sockets.cc
+++ b/test/cctest/test-sockets.cc
@@ -63,6 +63,19 @@
 }
 
 
+static bool SendAll(Socket* socket, const char* data, int len) {
+  int sent_len = 0;
+  while (sent_len < len) {
+    int status = socket->Send(data, len);
+    if (status <= 0) {
+      return false;
+    }
+    sent_len += status;
+  }
+  return true;
+}
+
+
 static void SendAndReceive(char *data, int len) {
   bool ok;
 
@@ -78,7 +91,7 @@
   CHECK(ok);
 
   // Send all the data.
-  ok = client->SendAll(data, len);
+  ok = SendAll(client, data, len);
   CHECK(ok);
 
   // Wait until data is received.
diff --git a/test/mjsunit/bugs/bug-269.js b/test/mjsunit/bugs/bug-269.js
new file mode 100644
index 0000000..49b24c0
--- /dev/null
+++ b/test/mjsunit/bugs/bug-269.js
@@ -0,0 +1,49 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    exec_state.prepareStep(Debug.StepAction.StepIn);
+  }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function g() {
+}
+ 
+function f() {
+  debugger;
+  g.apply(null, ['']);
+}
+
+f()
\ No newline at end of file
diff --git a/test/mjsunit/compare-constants.js b/test/mjsunit/compare-constants.js
new file mode 100644
index 0000000..e11ac0c
--- /dev/null
+++ b/test/mjsunit/compare-constants.js
@@ -0,0 +1,121 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test comparison operations that involve one or two constant smis.
+
+function test() {
+  var i = 5;
+  var j = 3;
+
+  assertTrue( j < i );
+  i = 5; j = 3;
+  assertTrue( j <= i );
+  i = 5; j = 3;
+  assertTrue( i > j );
+  i = 5; j = 3;
+  assertTrue( i >= j );
+  i = 5; j = 3;
+  assertTrue( i != j );
+  i = 5; j = 3;
+  assertTrue( i == i );
+  i = 5; j = 3;
+  assertFalse( i < j );
+  i = 5; j = 3;
+  assertFalse( i <= j );
+  i = 5; j = 3;
+  assertFalse( j > i );
+  i = 5; j = 3;
+  assertFalse(j >= i );
+  i = 5; j = 3;
+  assertFalse( j == i);
+  i = 5; j = 3;
+  assertFalse( i != i);
+
+  i = 10 * 10;
+  while ( i < 107 ) {
+    ++i;
+  }
+  j = 21;
+
+  assertTrue( j < i );
+  j = 21;
+  assertTrue( j <= i );
+  j = 21;
+  assertTrue( i > j );
+  j = 21;
+  assertTrue( i >= j );
+  j = 21;
+  assertTrue( i != j );
+  j = 21;
+  assertTrue( i == i );
+  j = 21;
+  assertFalse( i < j );
+  j = 21;
+  assertFalse( i <= j );
+  j = 21;
+  assertFalse( j > i );
+  j = 21;
+  assertFalse(j >= i );
+  j = 21;
+  assertFalse( j == i);
+  j = 21;
+  assertFalse( i != i);
+  j = 21;
+  assertTrue( j == j );
+  j = 21;
+  assertFalse( j != j );
+
+  assertTrue( 100 > 99 );
+  assertTrue( 101 >= 90 );
+  assertTrue( 11111 > -234 );
+  assertTrue( -888 <= -20 );
+
+  while ( 234 > 456 ) {
+    i = i + 1;
+  }
+
+  switch(3) {
+    case 5:
+      assertUnreachable();
+      break;
+    case 3:
+      j = 13;
+    default:
+      i = 2;
+    case 7:
+      j = 17;
+      break;
+    case 9:
+      j = 19;
+      assertUnreachable();
+      break;
+  }
+  assertEquals(17, j, "switch with constant value");
+}
+
+test();
+
diff --git a/test/mjsunit/debug-evaluate.js b/test/mjsunit/debug-evaluate.js
index 75d2334..5c5734f 100644
--- a/test/mjsunit/debug-evaluate.js
+++ b/test/mjsunit/debug-evaluate.js
@@ -64,33 +64,33 @@
 
 function listener(event, exec_state, event_data, data) {
   try {
-  if (event == Debug.DebugEvent.Break) {
-    // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
+    if (event == Debug.DebugEvent.Break) {
+      // Get the debug command processor.
+      var dcp = exec_state.debugCommandProcessor();
 
-    // Test some illegal evaluate requests.
-    testRequest(dcp, void 0, false);
-    testRequest(dcp, '{"expression":"1","global"=true}', false);
-    testRequest(dcp, '{"expression":"a","frame":4}', false);
+      // Test some illegal evaluate requests.
+      testRequest(dcp, void 0, false);
+      testRequest(dcp, '{"expression":"1","global"=true}', false);
+      testRequest(dcp, '{"expression":"a","frame":4}', false);
 
-    // Test some legal evaluate requests.
-    testRequest(dcp, '{"expression":"1+2"}', true, 3);
-    testRequest(dcp, '{"expression":"a+2"}', true, 5);
-    testRequest(dcp, '{"expression":"({\\"a\\":1,\\"b\\":2}).b+2"}', true, 4);
+      // Test some legal evaluate requests.
+      testRequest(dcp, '{"expression":"1+2"}', true, 3);
+      testRequest(dcp, '{"expression":"a+2"}', true, 5);
+      testRequest(dcp, '{"expression":"({\\"a\\":1,\\"b\\":2}).b+2"}', true, 4);
 
-    // Test evaluation of a in the stack frames and the global context.
-    testRequest(dcp, '{"expression":"a"}', true, 3);
-    testRequest(dcp, '{"expression":"a","frame":0}', true, 3);
-    testRequest(dcp, '{"expression":"a","frame":1}', true, 2);
-    testRequest(dcp, '{"expression":"a","frame":2}', true, 1);
-    testRequest(dcp, '{"expression":"a","global":true}', true, 1);
-    testRequest(dcp, '{"expression":"this.a","global":true}', true, 1);
+      // Test evaluation of a in the stack frames and the global context.
+      testRequest(dcp, '{"expression":"a"}', true, 3);
+      testRequest(dcp, '{"expression":"a","frame":0}', true, 3);
+      testRequest(dcp, '{"expression":"a","frame":1}', true, 2);
+      testRequest(dcp, '{"expression":"a","frame":2}', true, 1);
+      testRequest(dcp, '{"expression":"a","global":true}', true, 1);
+      testRequest(dcp, '{"expression":"this.a","global":true}', true, 1);
 
-    // Indicate that all was processed.
-    listenerComplete = true;
-  }
+      // Indicate that all was processed.
+      listenerComplete = true;
+    }
   } catch (e) {
-    exception = e
+   exception = e
   };
 };
 
diff --git a/test/mjsunit/debug-script-breakpoints.js b/test/mjsunit/debug-script-breakpoints.js
index 28c8018..ec9656c 100644
--- a/test/mjsunit/debug-script-breakpoints.js
+++ b/test/mjsunit/debug-script-breakpoints.js
@@ -29,8 +29,8 @@
 // Get the Debug object exposed from the debug context global object.
 Debug = debug.Debug
 
-// Set and remove a script break point.
-var sbp = Debug.setScriptBreakPoint("1", 2, 3);
+// Set and remove a script break point for a named script.
+var sbp = Debug.setScriptBreakPointByName("1", 2, 3);
 assertEquals(1, Debug.scriptBreakPoints().length);
 assertEquals("1", Debug.scriptBreakPoints()[0].script_name());
 assertEquals(2, Debug.scriptBreakPoints()[0].line());
@@ -38,10 +38,10 @@
 Debug.clearBreakPoint(sbp);
 assertEquals(0, Debug.scriptBreakPoints().length);
 
-// Set three script break points.
-var sbp1 = Debug.setScriptBreakPoint("1", 2, 3);
-var sbp2 = Debug.setScriptBreakPoint("2", 3, 4);
-var sbp3 = Debug.setScriptBreakPoint("3", 4, 5);
+// Set three script break points for named scripts.
+var sbp1 = Debug.setScriptBreakPointByName("1", 2, 3);
+var sbp2 = Debug.setScriptBreakPointByName("2", 3, 4);
+var sbp3 = Debug.setScriptBreakPointByName("3", 4, 5);
 
 // Check the content of the script break points.
 assertEquals(3, Debug.scriptBreakPoints().length);
@@ -57,7 +57,48 @@
     assertEquals(4, x.line());
     assertEquals(5, x.column());
   } else {
-    assertUnreachable("unecpected script_data " + x.script_data());
+    assertUnreachable("unecpected script_name " + x.script_name());
+  }
+}
+
+// Remove script break points (in another order than they where added).
+assertEquals(3, Debug.scriptBreakPoints().length);
+Debug.clearBreakPoint(sbp1);
+assertEquals(2, Debug.scriptBreakPoints().length);
+Debug.clearBreakPoint(sbp3);
+assertEquals(1, Debug.scriptBreakPoints().length);
+Debug.clearBreakPoint(sbp2);
+assertEquals(0, Debug.scriptBreakPoints().length);
+
+// Set and remove a script break point for a script id.
+var sbp = Debug.setScriptBreakPointById(1, 2, 3);
+assertEquals(1, Debug.scriptBreakPoints().length);
+assertEquals(1, Debug.scriptBreakPoints()[0].script_id());
+assertEquals(2, Debug.scriptBreakPoints()[0].line());
+assertEquals(3, Debug.scriptBreakPoints()[0].column());
+Debug.clearBreakPoint(sbp);
+assertEquals(0, Debug.scriptBreakPoints().length);
+
+// Set three script break points for script ids.
+var sbp1 = Debug.setScriptBreakPointById(1, 2, 3);
+var sbp2 = Debug.setScriptBreakPointById(2, 3, 4);
+var sbp3 = Debug.setScriptBreakPointById(3, 4, 5);
+
+// Check the content of the script break points.
+assertEquals(3, Debug.scriptBreakPoints().length);
+for (var i = 0; i < Debug.scriptBreakPoints().length; i++) {
+  var x = Debug.scriptBreakPoints()[i];
+  if (1 == x.script_id()) {
+    assertEquals(2, x.line());
+    assertEquals(3, x.column());
+  } else if (2 == x.script_id()) {
+    assertEquals(3, x.line());
+    assertEquals(4, x.column());
+  } else if (3 == x.script_id()) {
+    assertEquals(4, x.line());
+    assertEquals(5, x.column());
+  } else {
+    assertUnreachable("unecpected script_id " + x.script_id());
   }
 }
 
diff --git a/test/mjsunit/debug-script.js b/test/mjsunit/debug-script.js
index d7a8a24..effa145 100644
--- a/test/mjsunit/debug-script.js
+++ b/test/mjsunit/debug-script.js
@@ -33,28 +33,26 @@
 RegExp();
 
 // Count script types.
-var native_count = 0;
+var named_native_count = 0;
 var extension_count = 0;
 var normal_count = 0;
 var scripts = Debug.scripts();
 for (i = 0; i < scripts.length; i++) {
   if (scripts[i].type == Debug.ScriptType.Native) {
-    native_count++;
+    if (scripts[i].name) {
+      named_native_count++;
+    }
   } else if (scripts[i].type == Debug.ScriptType.Extension) {
     extension_count++;
   } else if (scripts[i].type == Debug.ScriptType.Normal) {
-    if (!scripts[i].name) print("X" + scripts[i].source + "X"); // empty script
-    else {
-      print(scripts[i].name);
-      normal_count++;
-      }
+    normal_count++;
   } else {
     assertUnreachable('Unexpected type ' + scripts[i].type);
   }
 }
 
 // This has to be updated if the number of native scripts change.
-assertEquals(12, native_count);
+assertEquals(12, named_native_count);
 // If no snapshot is used, only the 'gc' extension is loaded.
 // If snapshot is used, all extensions are cached in the snapshot.
 assertTrue(extension_count == 1 || extension_count == 5);
diff --git a/test/mjsunit/debug-setbreakpoint.js b/test/mjsunit/debug-setbreakpoint.js
index 2a8cd6c..904ec18 100644
--- a/test/mjsunit/debug-setbreakpoint.js
+++ b/test/mjsunit/debug-setbreakpoint.js
@@ -30,8 +30,13 @@
 Debug = debug.Debug
 
 // Simple function which stores the last debug event.
-listenerComplete = false;
-exception = false;
+var listenerComplete = false;
+var exception = false;
+var f_script_id = 0;
+var g_script_id = 0;
+var h_script_id = 0;
+var f_line = 0;
+var g_line = 0;
 
 var base_request = '"seq":0,"type":"request","command":"setbreakpoint"'
 
@@ -44,13 +49,17 @@
   }
 }
 
-function testArguments(dcp, arguments, success, type) {
+function testArguments(dcp, arguments, success, is_script) {
   var request = '{' + base_request + ',"arguments":' + arguments + '}'
   var json_response = dcp.processDebugJSONRequest(request);
   var response = safeEval(json_response);
   if (success) {
     assertTrue(response.success, json_response);
-    assertEquals(type ? type : 'script', response.body.type, json_response);
+    if (is_script) {
+      assertEquals('scriptName', response.body.type, json_response);
+    } else {
+      assertEquals('scriptId', response.body.type, json_response);
+    }
   } else {
     assertFalse(response.success, json_response);
   }
@@ -79,18 +88,23 @@
     testArguments(dcp, '{"type":"function","target":"f","ignoreCount":-1}', false);
 
     // Test some legal setbreakpoint requests.
-    testArguments(dcp, '{"type":"function","target":"f"}', true);
-    testArguments(dcp, '{"type":"function","target":"h"}', true, 'function');
-    testArguments(dcp, '{"type":"function","target":"f","line":1}', true);
-    testArguments(dcp, '{"type":"function","target":"f","position":1}', true);
-    testArguments(dcp, '{"type":"function","target":"f","condition":"i == 1"}', true);
-    testArguments(dcp, '{"type":"function","target":"f","enabled":true}', true);
-    testArguments(dcp, '{"type":"function","target":"f","enabled":false}', true);
-    testArguments(dcp, '{"type":"function","target":"f","ignoreCount":7}', true);
-    testArguments(dcp, '{"type":"script","target":"test"}', true);
-    testArguments(dcp, '{"type":"script","target":"test"}', true);
-    testArguments(dcp, '{"type":"script","target":"test","line":1}', true);
-    testArguments(dcp, '{"type":"script","target":"test","column":1}', true);
+    testArguments(dcp, '{"type":"function","target":"f"}', true, false);
+    testArguments(dcp, '{"type":"function","target":"h"}', true, false);
+    testArguments(dcp, '{"type":"function","target":"f","line":1}', true, false);
+    testArguments(dcp, '{"type":"function","target":"f","position":1}', true, false);
+    testArguments(dcp, '{"type":"function","target":"f","condition":"i == 1"}', true, false);
+    testArguments(dcp, '{"type":"function","target":"f","enabled":true}', true, false);
+    testArguments(dcp, '{"type":"function","target":"f","enabled":false}', true, false);
+    testArguments(dcp, '{"type":"function","target":"f","ignoreCount":7}', true, false);
+
+    testArguments(dcp, '{"type":"script","target":"test"}', true, true);
+    testArguments(dcp, '{"type":"script","target":"test"}', true, true);
+    testArguments(dcp, '{"type":"script","target":"test","line":1}', true, true);
+    testArguments(dcp, '{"type":"script","target":"test","column":1}', true, true);
+
+    testArguments(dcp, '{"type":"scriptId","target":' + f_script_id + ',"line":' + f_line + '}', true, false);
+    testArguments(dcp, '{"type":"scriptId","target":' + g_script_id + ',"line":' + g_line + '}', true, false);
+    testArguments(dcp, '{"type":"scriptId","target":' + h_script_id + ',"line":' + h_line + '}', true, false);
 
     // Indicate that all was processed.
     listenerComplete = true;
@@ -113,10 +127,27 @@
 
 eval('function h(){}');
 
+// Check the script ids for the test functions.
+f_script_id = Debug.findScript(f).id;
+g_script_id = Debug.findScript(g).id;
+h_script_id = Debug.findScript(h).id;
+assertTrue(f_script_id > 0, "invalid script id for f");
+assertTrue(g_script_id > 0, "invalid script id for g");
+assertTrue(h_script_id > 0, "invalid script id for h");
+assertEquals(f_script_id, g_script_id);
+
+// Get the source line for the test functions.
+f_line = Debug.findFunctionSourceLocation(f).line;
+g_line = Debug.findFunctionSourceLocation(g).line;
+h_line = Debug.findFunctionSourceLocation(h).line;
+assertTrue(f_line > 0, "invalid line for f");
+assertTrue(g_line > 0, "invalid line for g");
+assertTrue(f_line < g_line);
+assertEquals(h_line, 0, "invalid line for h");
+
 // Set a break point and call to invoke the debug event listener.
 Debug.setBreakPoint(g, 0, 0);
 g();
 
 // Make sure that the debug event listener vas invoked.
-assertTrue(listenerComplete, "listener did not run to completion");
-assertFalse(exception, "exception in listener")
+assertTrue(listenerComplete, "listener did not run to completion: " + exception);
diff --git a/test/mjsunit/debug-sourceinfo.js b/test/mjsunit/debug-sourceinfo.js
index 2bad07a..36e9f03 100644
--- a/test/mjsunit/debug-sourceinfo.js
+++ b/test/mjsunit/debug-sourceinfo.js
@@ -175,18 +175,18 @@
 assertEquals(0, script.locationFromLine(1, 12, start_b).column);

 

 // Test the Debug.findSourcePosition which wraps SourceManager.

-assertEquals(0 + start_a, Debug.findFunctionSourcePosition(a, 0, 0));

-assertEquals(0 + start_b, Debug.findFunctionSourcePosition(b, 0, 0));

-assertEquals(6 + start_b, Debug.findFunctionSourcePosition(b, 1, 0));

-assertEquals(8 + start_b, Debug.findFunctionSourcePosition(b, 1, 2));

-assertEquals(18 + start_b, Debug.findFunctionSourcePosition(b, 2, 0));

-assertEquals(0 + start_c, Debug.findFunctionSourcePosition(c, 0, 0));

-assertEquals(7 + start_c, Debug.findFunctionSourcePosition(c, 1, 0));

-assertEquals(21 + start_c, Debug.findFunctionSourcePosition(c, 2, 0));

-assertEquals(38 + start_c, Debug.findFunctionSourcePosition(c, 3, 0));

-assertEquals(52 + start_c, Debug.findFunctionSourcePosition(c, 4, 0));

-assertEquals(69 + start_c, Debug.findFunctionSourcePosition(c, 5, 0));

-assertEquals(76 + start_c, Debug.findFunctionSourcePosition(c, 6, 0));

+assertEquals(0 + start_a, Debug.findFunctionSourceLocation(a, 0, 0).position);

+assertEquals(0 + start_b, Debug.findFunctionSourceLocation(b, 0, 0).position);

+assertEquals(6 + start_b, Debug.findFunctionSourceLocation(b, 1, 0).position);

+assertEquals(8 + start_b, Debug.findFunctionSourceLocation(b, 1, 2).position);

+assertEquals(18 + start_b, Debug.findFunctionSourceLocation(b, 2, 0).position);

+assertEquals(0 + start_c, Debug.findFunctionSourceLocation(c, 0, 0).position);

+assertEquals(7 + start_c, Debug.findFunctionSourceLocation(c, 1, 0).position);

+assertEquals(21 + start_c, Debug.findFunctionSourceLocation(c, 2, 0).position);

+assertEquals(38 + start_c, Debug.findFunctionSourceLocation(c, 3, 0).position);

+assertEquals(52 + start_c, Debug.findFunctionSourceLocation(c, 4, 0).position);

+assertEquals(69 + start_c, Debug.findFunctionSourceLocation(c, 5, 0).position);

+assertEquals(76 + start_c, Debug.findFunctionSourceLocation(c, 6, 0).position);

 

 // Test source line and restriction. All the following tests start from line 1

 // column 2 in function b, which is the call to c.

diff --git a/test/mjsunit/debug-step.js b/test/mjsunit/debug-step.js
index 7a75bb1..4534218 100644
--- a/test/mjsunit/debug-step.js
+++ b/test/mjsunit/debug-step.js
@@ -36,8 +36,7 @@
 var bp1, bp2;
 
 function listener(event, exec_state, event_data, data) {
-  if (event == Debug.DebugEvent.Break)
-  {
+  if (event == Debug.DebugEvent.Break) {
     if (state == 0) {
       exec_state.prepareStep(Debug.StepAction.StepIn, 1000);
       state = 1;
@@ -68,7 +67,6 @@
 state = 0;
 result = -1;
 f();
-print(state);
 assertEquals(499, result);
 
 // Check that performing 1000 steps with a break point on the statement in the
diff --git a/test/mjsunit/error-constructors.js b/test/mjsunit/error-constructors.js
new file mode 100644
index 0000000..ca2aa06
--- /dev/null
+++ b/test/mjsunit/error-constructors.js
@@ -0,0 +1,32 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var e = new Error();
+assertFalse(e.hasOwnProperty('message'));
+Error.prototype.toString = Object.prototype.toString;
+assertEquals("[object Error]", Error.prototype.toString());
+assertEquals(Object.prototype, Error.prototype.__proto__);
diff --git a/test/mjsunit/fuzz-natives.js b/test/mjsunit/fuzz-natives.js
index a2c3217..c9d192f 100644
--- a/test/mjsunit/fuzz-natives.js
+++ b/test/mjsunit/fuzz-natives.js
@@ -31,8 +31,8 @@
   var result = [ ];
   result.push(17);
   result.push(-31);
-  result.push(Number.MAX_VALUE);
-  result.push(new Array(5003));
+  result.push(new Array(100));
+  result.push(new Array(100003));
   result.push(Number.MIN_VALUE);
   result.push("whoops");
   result.push("x");
@@ -121,7 +121,8 @@
   "PushContext": true,
   "LazyCompile": true,
   "CreateObjectLiteralBoilerplate": true,
-  "CloneObjectLiteralBoilerplate": true,
+  "CloneLiteralBoilerplate": true,
+  "CreateArrayLiteralBoilerplate": true,
   "IS_VAR": true,
   "ResolvePossiblyDirectEval": true,
   "Log": true
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 090208e..375978e 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -34,12 +34,18 @@
 # too long to run in debug mode on ARM.
 fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm)
 
+# Bug realiably triggers a debug assertion and crashed in release mode.
+bugs/bug-269: CRASH, FAIL if $mode == debug
+
 [ $arch == arm ]
 
 # Slow tests which times out in debug mode.
 try: PASS, SKIP if $mode == debug
 debug-scripts-request: PASS, SKIP if $mode == debug
 
+# Flaky test that can hit compilation-time stack overflow in debug mode.
+unicode-test: PASS, (PASS || FAIL) if $mode == debug
+
 # Bug number 1020483: Debug tests fail on ARM.
 debug-constructor: CRASH, FAIL
 debug-continue: SKIP
@@ -56,6 +62,7 @@
 debug-step: SKIP
 debug-breakpoints: PASS || FAIL
 debug-handle: CRASH, FAIL if $mode == debug
+bugs/bug-269: SKIP
 
 # Bug number 130 http://code.google.com/p/v8/issues/detail?id=130
 # Fails on real ARM hardware but not on the simulator.
diff --git a/test/mjsunit/multiple-return.js b/test/mjsunit/multiple-return.js
new file mode 100644
index 0000000..610a367
--- /dev/null
+++ b/test/mjsunit/multiple-return.js
@@ -0,0 +1,62 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function F() {
+  for (var x in [1,2,3]) {
+    return 42;
+  }
+  return 87;
+}
+
+
+function G() {
+  for (var x in [1,2,3]) {
+    try {
+      return 42;
+    } finally {
+      // Do nothing.
+    }
+  }
+  return 87;
+}
+
+
+function H() {
+  for (var x in [1,2,3]) {
+    try {
+      return 42;
+    } catch (e) {
+      // Do nothing.
+    }
+  }
+  return 87;
+}
+
+
+assertEquals(42, F());
+assertEquals(42, G());
+assertEquals(42, H());
diff --git a/test/mjsunit/object-literal.js b/test/mjsunit/object-literal.js
new file mode 100644
index 0000000..e2da84b
--- /dev/null
+++ b/test/mjsunit/object-literal.js
@@ -0,0 +1,63 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var obj = {
+    a: 7,
+    b: { x: 12, y: 24 },
+    c: 'Zebra'
+}
+
+assertEquals(7, obj.a);
+assertEquals(12, obj.b.x);
+assertEquals(24, obj.b.y);
+assertEquals('Zebra', obj.c);
+
+var z = 24;
+
+var obj2 = {
+    a: 7,
+    b: { x: 12, y: z },
+    c: 'Zebra'
+}
+
+assertEquals(7, obj2.a);
+assertEquals(12, obj2.b.x);
+assertEquals(24, obj2.b.y);
+assertEquals('Zebra', obj2.c);
+
+var arr = [];
+for (var i = 0; i < 2; i++) {
+  arr[i] = {
+      a: 7,
+      b: { x: 12, y: 24 },
+      c: 'Zebra'
+  }
+}
+
+arr[0].a = 2;
+assertEquals(2, arr[0].a);
+assertEquals(7, arr[1].a);
diff --git a/test/mjsunit/regexp-static.js b/test/mjsunit/regexp-static.js
index 5db9fe2..9e73f3d 100644
--- a/test/mjsunit/regexp-static.js
+++ b/test/mjsunit/regexp-static.js
@@ -132,3 +132,36 @@
 re = /(.)/g;
 function f() { return RegExp.$1; };
 assertEquals('abcd', 'abcd'.replace(re, f));
+
+// lastParen where the last parenthesis didn't match.
+assertEquals("foo,", /foo(?:a(x))?/.exec("foobx"), "lastParen setup");
+assertEquals("", RegExp.lastParen, "lastParen");
+
+// The same test for $1 to $9.
+for (var i = 1; i <= 9; i++) {
+  var haystack = "foo";
+  var re_text = "^foo";
+  for (var j = 0; j < i - 1; j++) {
+    haystack += "x";
+    re_text += "(x)";
+  }
+  re_text += "(?:a(x))?";
+  haystack += "bx";
+  var re = new RegExp(re_text);
+  assertTrue(re.test(haystack), "$" + i + " setup");
+  for (var j = 1; j < i - 1; j++) {
+    assertEquals("x", RegExp['$' + j], "$" + j + " in $" + i + " setup");
+  }
+  assertEquals("", RegExp['$' + (i)], "$" + i);
+}
+
+RegExp.multiline = "foo";
+assertTrue(typeof RegExp.multiline == typeof Boolean(), "RegExp.multiline coerces values to booleans");
+RegExp.input = Number();
+assertTrue(typeof RegExp.input == typeof String(), "RegExp.input coerces values to booleans");
+
+// Ensure that we save the correct string as the last subject when
+// we do a match on a sliced string (the top one not the underlying).
+var foo = "lsdfj sldkfj sdklfj læsdfjl sdkfjlsdk fjsdl fjsdljskdj flsj flsdkj flskd regexp: /foobar/\nldkfj sdlkfj sdkl";
+assertTrue(/^([a-z]+): (.*)/.test(foo.substring(foo.indexOf("regexp:"))), "regexp: setup");
+assertEquals("regexp", RegExp.$1, "RegExp.$1");
diff --git a/test/mjsunit/regexp-string-methods.js b/test/mjsunit/regexp-string-methods.js
new file mode 100644
index 0000000..ef3bf6e
--- /dev/null
+++ b/test/mjsunit/regexp-string-methods.js
@@ -0,0 +1,51 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regexp shouldn't use String.prototype.slice()
+var s = new String("foo");
+assertEquals("f", s.slice(0,1));
+String.prototype.slice = function() { return "x"; }
+assertEquals("x", s.slice(0,1));
+assertEquals("g", /g/.exec("gg"));
+
+// Regexp shouldn't use String.prototype.charAt()
+var f1 = new RegExp("f", "i");
+assertEquals("F", f1.exec("F"));
+assertEquals("f", "foo".charAt(0));
+String.prototype.charAt = function(idx) { return 'g'; };
+assertEquals("g", "foo".charAt(0));
+var f2 = new RegExp("[g]", "i");
+assertEquals("G", f2.exec("G"));
+assertTrue(f2.ignoreCase);
+
+// On the other hand test is defined in a semi-coherent way as a call to exec.
+// 15.10.6.3
+// We match other browsers in using the original value of RegExp.prototype.exec.
+// I.e., RegExp.prototype.test shouldn't use the current value of
+// RegExp.prototype.exec.
+RegExp.prototype.exec = function(string) { return 'x'; }
+assertFalse(/f/.test('x'));
diff --git a/test/mjsunit/regress/regress-1493017.js b/test/mjsunit/regress/regress-1493017.js
new file mode 100644
index 0000000..99a1dad
--- /dev/null
+++ b/test/mjsunit/regress/regress-1493017.js
@@ -0,0 +1,52 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test collection of abandoned maps.  Tests that deleted map
+// transitions do not show up as properties in for in.
+
+// Flags: --expose-gc --collect-maps
+
+function C() {}
+
+
+// Create an instance of C.  Add a property to the instance and then
+// remove all references to instances of C.
+var o = new C();
+o.x = 42;
+o = null;
+
+// Force a global GC. This will collect the maps starting from C and
+// delete map transitions.
+gc();
+
+// Create a new instance of C.
+o = new C();
+
+// Test that the deleted map transitions do not show up in for in.
+for (var p in o) {
+  assertTrue(false);
+}
diff --git a/test/mjsunit/regress/regress-244.js b/test/mjsunit/regress/regress-244.js
new file mode 100644
index 0000000..ffddcf8
--- /dev/null
+++ b/test/mjsunit/regress/regress-244.js
@@ -0,0 +1,67 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var kLegalPairs = [
+  [0x00, '%00'],
+  [0x01, '%01'],
+  [0x7f, '%7F'],
+  [0x80, '%C2%80'],
+  [0x81, '%C2%81'],
+  [0x7ff, '%DF%BF'],
+  [0x800, '%E0%A0%80'],
+  [0x801, '%E0%A0%81'],
+  [0xd7ff, '%ED%9F%BF'],
+  [0xffff, '%EF%BF%BF']
+];
+
+var kIllegalEncoded = [
+  '%80', '%BF', '%80%BF', '%80%BF%80', '%C0%22', '%DF',
+  '%EF%BF', '%F7BFBF', '%FE', '%FF', '%FE%FE%FF%FF',
+  '%C0%AF', '%E0%9F%BF', '%F0%8F%BF%BF', '%C0%80',
+  '%E0%80%80'
+];
+
+function run() {
+  for (var i = 0; i < kLegalPairs.length; i++) {
+    var decoded = String.fromCharCode(kLegalPairs[i][0]);
+    var encoded = kLegalPairs[i][1];
+    assertEquals(decodeURI(encoded), decoded);
+    assertEquals(encodeURI(decoded), encoded);
+  }
+  for (var i = 0; i < kIllegalEncoded.length; i++) {
+    var value = kIllegalEncoded[i];
+    var threw = false;
+    try {
+      decodeURI(value);
+      fail(value);
+    } catch (e) {
+      assertInstanceof(e, URIError);
+    }
+  }
+}
+
+run();
diff --git a/test/mjsunit/regress/regress-254.js b/test/mjsunit/regress/regress-254.js
new file mode 100755
index 0000000..ec4b40a
--- /dev/null
+++ b/test/mjsunit/regress/regress-254.js
@@ -0,0 +1,58 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See: http://code.google.com/p/v8/issues/detail?id=254
+
+// RegExp with global flag: exec and test updates lastIndex.
+var re = /x/g;
+
+assertEquals(0, re.lastIndex, "Global, initial lastIndex");
+
+assertTrue(re.test("x"), "Global, test 1");
+assertEquals(1, re.lastIndex, "Global, lastIndex after test 1");
+assertFalse(re.test("x"), "Global, test 2");
+assertEquals(0, re.lastIndex, "Global, lastIndex after test 2");
+
+assertEquals(["x"], re.exec("x"), "Global, exec 1");
+assertEquals(1, re.lastIndex, "Global, lastIndex after exec 1");
+assertEquals(null, re.exec("x"), "Global, exec 2");
+assertEquals(0, re.lastIndex, "Global, lastIndex after exec 2");
+
+// RegExp without global flag: exec and test leavs lastIndex at zero.
+var re2 = /x/;
+
+assertEquals(0, re2.lastIndex, "Non-global, initial lastIndex");
+
+assertTrue(re2.test("x"), "Non-global, test 1");
+assertEquals(0, re2.lastIndex, "Non-global, lastIndex after test 1");
+assertTrue(re2.test("x"), "Non-global, test 2");
+assertEquals(0, re2.lastIndex, "Non-global, lastIndex after test 2");
+
+assertEquals(["x"], re2.exec("x"), "Non-global, exec 1");
+assertEquals(0, re2.lastIndex, "Non-global, lastIndex after exec 1");
+assertEquals(["x"], re2.exec("x"), "Non-global, exec 2");
+assertEquals(0, re2.lastIndex, "Non-global, lastIndex after exec 2");
diff --git a/test/mjsunit/regress/regress-259.js b/test/mjsunit/regress/regress-259.js
new file mode 100644
index 0000000..f0476ff
--- /dev/null
+++ b/test/mjsunit/regress/regress-259.js
@@ -0,0 +1,33 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we do not crash when compiling a try/finally with an
+// infinite loop (with no normal exits) in the try block.
+
+// See http://code.google.com/p/v8/issues/detail?id=259
+
+assertThrows("try { while (true) { throw 0; }} finally {}");
diff --git a/test/mjsunit/regress/regress-260.js b/test/mjsunit/regress/regress-260.js
new file mode 100644
index 0000000..65242bc
--- /dev/null
+++ b/test/mjsunit/regress/regress-260.js
@@ -0,0 +1,33 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// We should not compile the bodies of function literals in loop
+// conditions twice, even in cases where the loop condition is
+// compiled twice.
+
+function test() { eval("while(!function () { var x; });"); }
+test();
diff --git a/test/mjsunit/regress/regress-263.js b/test/mjsunit/regress/regress-263.js
new file mode 100644
index 0000000..123bde6
--- /dev/null
+++ b/test/mjsunit/regress/regress-263.js
@@ -0,0 +1,38 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Exits via return, break, or continue from within try/finally or
+// for/in should not crash or trigger a debug assert.
+
+// See http://code.google.com/p/v8/issues/detail?id=263
+
+function test0() { with({}) for(var x in {}) return; }
+test0();
+
+
+function test1() { with({}) try { } finally { with({}) return; } }
+test1();
diff --git a/test/mjsunit/regress/regress-265.js b/test/mjsunit/regress/regress-265.js
new file mode 100644
index 0000000..21ac1a6
--- /dev/null
+++ b/test/mjsunit/regress/regress-265.js
@@ -0,0 +1,64 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// When returning or breaking out of a deeply nested try/finally, we
+// should not crash.
+
+// See http://code.google.com/p/v8/issues/detail?id=263
+
+function test0() {
+  try {
+    try {
+      return 0;
+    } finally {
+      try {
+        return 0;
+      } finally {
+      }
+    }
+  } finally {
+  }
+}
+
+test0();
+
+function test1() {
+L0:
+  try {
+    try {
+      break L0;
+    } finally {
+      try {
+        break L0;
+      } finally {
+      }
+    }
+  } finally {
+  }
+}
+
+test1();
diff --git a/test/mjsunit/regress/regress-267.js b/test/mjsunit/regress/regress-267.js
new file mode 100644
index 0000000..bb61606
--- /dev/null
+++ b/test/mjsunit/regress/regress-267.js
@@ -0,0 +1,35 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See http://code.google.com/p/v8/issues/detail?id=267
+
+var global = (function(){ return this; })();
+function taint(fn){var v = fn(); eval("taint"); return v; }
+function getThis(){ return this; }
+var obj = taint(getThis);
+
+assertEquals(global, obj, "Should be the global object.");
diff --git a/test/mjsunit/short-circuit-boolean.js b/test/mjsunit/short-circuit-boolean.js
new file mode 100644
index 0000000..df40c22
--- /dev/null
+++ b/test/mjsunit/short-circuit-boolean.js
@@ -0,0 +1,46 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test some code paths through the compiler for short-circuited
+// boolean expressions.
+
+function andTest0() {
+  var a = 0;
+  // Left subexpression is known false at compile time.
+  return a != 0 && "failure";
+}
+
+assertFalse(andTest0());
+
+
+function orTest0() {
+  var a = 0;
+  // Left subexpression is known true at compile time.
+  return a == 0 || "failure";
+}
+
+assertTrue(orTest0());
diff --git a/test/mjsunit/sparse-array-reverse.js b/test/mjsunit/sparse-array-reverse.js
index 9b9f323..45a6da4 100644
--- a/test/mjsunit/sparse-array-reverse.js
+++ b/test/mjsunit/sparse-array-reverse.js
@@ -74,7 +74,15 @@
 
     var to_delete = [];
 
-    var a = new Array(size);
+    var a;
+    // Make sure we test both array-backed and hash-table backed
+    // arrays.
+    if (size < 1000) {
+      a = new Array(size);
+    } else {
+      a = new Array();
+      a.length = size;
+    }
 
     var expected = '';
     var expected_reversed = '';
diff --git a/test/mjsunit/switch.js b/test/mjsunit/switch.js
index e2b14d1..4044490 100644
--- a/test/mjsunit/switch.js
+++ b/test/mjsunit/switch.js
@@ -222,8 +222,8 @@
 assertEquals(190, f6(20), "largeSwitch.20");
 assertEquals(2016, f6(64), "largeSwitch.64");
 assertEquals(4032, f6(128), "largeSwitch.128");
-assertEquals(4222, f6(148), "largeSwitch.148"); 
- 
+assertEquals(4222, f6(148), "largeSwitch.148");
+
 
 function f7(value) {
   switch (value) {
@@ -252,7 +252,7 @@
   case 11:
   case 12:
   case 13:
-  case 14: 
+  case 14:
   case 15:  // Dummy fillers
   }
   return "default";
@@ -270,7 +270,7 @@
 
 function makeVeryLong(length) {
   var res = "function() {\n" +
-            "  var res = 0;\n" + 
+            "  var res = 0;\n" +
             "  for (var i = 0; i <= " + length + "; i++) {\n" +
             "    switch(i) {\n";
   for (var i = 0; i < length; i++) {
@@ -286,4 +286,4 @@
 var verylong_size = 1000;
 var verylong = makeVeryLong(verylong_size);
 
-assertEquals(verylong_size * 2 + 1, verylong());
\ No newline at end of file
+assertEquals(verylong_size * 2 + 1, verylong());
diff --git a/test/mjsunit/this.js b/test/mjsunit/this.js
index 0019eb2..890dea4 100644
--- a/test/mjsunit/this.js
+++ b/test/mjsunit/this.js
@@ -35,7 +35,7 @@
 assertTrue(this === f());
 
 var x = {}, y = {};
-x.f = y.f = f; 
+x.f = y.f = f;
 assertFalse(x === f());
 assertFalse(y === f());
 assertTrue(x === x.f());
diff --git a/test/mjsunit/try.js b/test/mjsunit/try.js
index a3f4433..0bd78b4 100644
--- a/test/mjsunit/try.js
+++ b/test/mjsunit/try.js
@@ -65,7 +65,7 @@
 assertEquals(4, guard(function() { try { throw 3; } finally { throw 4; } }));
 
 (function () {
-  var iter = 10000000;
+  var iter = 1000000;
   for (var i = 1; i <= iter; i++) {
     try {
       if (i == iter) gc();
diff --git a/tools/js2c.py b/tools/js2c.py
index 0ae1ad9..52fe35c 100755
--- a/tools/js2c.py
+++ b/tools/js2c.py
@@ -104,7 +104,7 @@
 
 def ExpandMacros(lines, macros):
   for name, macro in macros.items():
-    start = lines.find(name, 0)
+    start = lines.find(name + '(', 0)
     while start != -1:
       # Scan over the arguments
       assert lines[start + len(name)] == '('
@@ -132,7 +132,7 @@
       result = macro.expand(mapping)
       # Replace the occurrence of the macro with the expansion
       lines = lines[:start] + result + lines[end:]
-      start = lines.find(name, end)
+      start = lines.find(name + '(', end)
   return lines
 
 class TextMacro:
diff --git a/tools/run-valgrind.py b/tools/run-valgrind.py
new file mode 100755
index 0000000..ccb9309
--- /dev/null
+++ b/tools/run-valgrind.py
@@ -0,0 +1,77 @@
+#!/usr/bin/python
+#
+# Copyright 2009 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Simple wrapper for running valgrind and checking the output on
+# stderr for memory leaks.
+
+import subprocess
+import sys
+import re
+
+VALGRIND_ARGUMENTS = [
+  'valgrind',
+  '--error-exitcode=1',
+  '--leak-check=full',
+  '--smc-check=all'
+]
+
+# Compute the command line.
+command = VALGRIND_ARGUMENTS + sys.argv[1:]
+
+# Run valgrind.
+process = subprocess.Popen(command, stderr=subprocess.PIPE)
+code = process.wait();
+errors = process.stderr.readlines();
+
+# If valgrind produced an error, we report that to the user.
+if code != 0:
+  sys.stderr.writelines(errors)
+  sys.exit(code)
+
+# Look through the leak details and make sure that we don't
+# have any definitely, indirectly, and possibly lost bytes.
+LEAK_RE = r"(?:definitely|indirectly|possibly) lost: \d+ bytes in \d+ blocks."
+LEAK_LINE_MATCHER = re.compile(LEAK_RE)
+LEAK_OKAY_MATCHER = re.compile(r"lost: 0 bytes in 0 blocks.")
+leaks = []
+for line in errors:
+  if LEAK_LINE_MATCHER.search(line):
+    leaks.append(line)
+    if not LEAK_OKAY_MATCHER.search(line):
+      sys.stderr.writelines(errors)
+      sys.exit(1)
+
+# Make sure we found between 2 and 3 leak lines.
+if len(leaks) < 2 or len(leaks) > 3:
+  sys.stderr.writelines(errors)
+  sys.stderr.write('\n\n#### Malformed valgrind output.\n#### Exiting.\n')
+  sys.exit(1)
+
+# No leaks found.
+sys.exit(0)
diff --git a/tools/test.py b/tools/test.py
index c1a8927..0ed5e9b 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -1078,6 +1078,8 @@
   result.add_option("--simulator", help="Run tests with architecture simulator",
       default='none')
   result.add_option("--special-command", default=None)
+  result.add_option("--valgrind", help="Run tests through valgrind",
+      default=False, action="store_true")
   result.add_option("--cat", help="Print the source of the tests",
       default=False, action="store_true")
   result.add_option("--warn-unused", help="Report unused rules",
@@ -1214,12 +1216,18 @@
       path = SplitPath(arg)
       paths.append(path)
 
+  # Check for --valgrind option. If enabled, we overwrite the special
+  # command flag with a command that uses the run-valgrind.py script.
+  if options.valgrind:
+    run_valgrind = join(workspace, "tools", "run-valgrind.py")
+    options.special_command = "python -u " + run_valgrind + " @"
+
   # First build the required targets
   buildspace = abspath('.')
   context = Context(workspace, buildspace, VERBOSE,
                     join(buildspace, 'shell'),
                     options.timeout,
-                    GetSpecialCommandProcessor(options.special_command), 
+                    GetSpecialCommandProcessor(options.special_command),
                     options.suppress_dialogs)
   if options.j != 1:
     options.scons_flags += ['-j', str(options.j)]
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index b83a99a..2e91d87 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -25,6 +25,18 @@
 /* End PBXAggregateTarget section */
 
 /* Begin PBXBuildFile section */
+		58950D5E0F55519800F3E8BA /* jump-target.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D500F55514900F3E8BA /* jump-target.cc */; };
+		58950D5F0F55519D00F3E8BA /* jump-target-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */; };
+		58950D600F5551A300F3E8BA /* jump-target.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D500F55514900F3E8BA /* jump-target.cc */; };
+		58950D610F5551A400F3E8BA /* jump-target-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D4E0F55514900F3E8BA /* jump-target-arm.cc */; };
+		58950D620F5551AF00F3E8BA /* register-allocator-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D530F55514900F3E8BA /* register-allocator-ia32.cc */; };
+		58950D630F5551AF00F3E8BA /* register-allocator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D540F55514900F3E8BA /* register-allocator.cc */; };
+		58950D640F5551B500F3E8BA /* register-allocator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D540F55514900F3E8BA /* register-allocator.cc */; };
+		58950D650F5551B600F3E8BA /* register-allocator-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D520F55514900F3E8BA /* register-allocator-arm.cc */; };
+		58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D5A0F55514900F3E8BA /* virtual-frame.cc */; };
+		58950D670F5551C400F3E8BA /* virtual-frame-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D580F55514900F3E8BA /* virtual-frame-ia32.cc */; };
+		58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D5A0F55514900F3E8BA /* virtual-frame.cc */; };
+		58950D690F5551CE00F3E8BA /* virtual-frame-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D560F55514900F3E8BA /* virtual-frame-arm.cc */; };
 		8900116C0E71CA2300F91F35 /* libraries.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8900116B0E71CA2300F91F35 /* libraries.cc */; };
 		890A13FE0EE9C47F00E49346 /* interpreter-irregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */; };
 		890A14010EE9C4B000E49346 /* regexp-macro-assembler-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C700EE466D000B48DEB /* regexp-macro-assembler-arm.cc */; };
@@ -38,8 +50,10 @@
 		893CCE640E71D83700357A03 /* code-stubs.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1110E719B8F00D62E90 /* code-stubs.cc */; };
 		8944AD100F1D4D500028D560 /* regexp-stack.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */; };
 		8944AD110F1D4D570028D560 /* regexp-stack.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */; };
+		894599A30F5D8729008DA8FB /* debug-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */; };
 		89495E480E79FC23001F68C3 /* compilation-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89495E460E79FC23001F68C3 /* compilation-cache.cc */; };
 		89495E490E79FC23001F68C3 /* compilation-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89495E460E79FC23001F68C3 /* compilation-cache.cc */; };
+		8956B6CF0F5D86730033B5A2 /* debug-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */; };
 		896FD03A0E78D717003DFB6A /* libv8-arm.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 89F23C870E78D5B2006B2466 /* libv8-arm.a */; };
 		897F767F0E71B690007ACF34 /* shell.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1B50E719C0900D62E90 /* shell.cc */; };
 		897F76850E71B6B1007ACF34 /* libv8.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 8970F2F00E719FB2006AE7B5 /* libv8.a */; };
@@ -186,6 +200,8 @@
 		89F23C9E0E78D5FD006B2466 /* macro-assembler-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1540E719B8F00D62E90 /* macro-assembler-arm.cc */; };
 		89F23C9F0E78D604006B2466 /* simulator-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF17D0E719B8F00D62E90 /* simulator-arm.cc */; };
 		89F23CA00E78D609006B2466 /* stub-cache-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18A0E719B8F00D62E90 /* stub-cache-arm.cc */; };
+		9FC86ABD0F5FEDAC00F22668 /* oprofile-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */; };
+		9FC86ABE0F5FEDAC00F22668 /* oprofile-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */; };
 /* End PBXBuildFile section */
 
 /* Begin PBXContainerItemProxy section */
@@ -248,6 +264,20 @@
 /* End PBXContainerItemProxy section */
 
 /* Begin PBXFileReference section */
+		58950D4E0F55514900F3E8BA /* jump-target-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "jump-target-arm.cc"; sourceTree = "<group>"; };
+		58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "jump-target-ia32.cc"; sourceTree = "<group>"; };
+		58950D500F55514900F3E8BA /* jump-target.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "jump-target.cc"; sourceTree = "<group>"; };
+		58950D510F55514900F3E8BA /* jump-target.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target.h"; sourceTree = "<group>"; };
+		58950D520F55514900F3E8BA /* register-allocator-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "register-allocator-arm.cc"; sourceTree = "<group>"; };
+		58950D530F55514900F3E8BA /* register-allocator-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "register-allocator-ia32.cc"; sourceTree = "<group>"; };
+		58950D540F55514900F3E8BA /* register-allocator.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "register-allocator.cc"; sourceTree = "<group>"; };
+		58950D550F55514900F3E8BA /* register-allocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "register-allocator.h"; sourceTree = "<group>"; };
+		58950D560F55514900F3E8BA /* virtual-frame-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "virtual-frame-arm.cc"; sourceTree = "<group>"; };
+		58950D570F55514900F3E8BA /* virtual-frame-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame-arm.h"; sourceTree = "<group>"; };
+		58950D580F55514900F3E8BA /* virtual-frame-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "virtual-frame-ia32.cc"; sourceTree = "<group>"; };
+		58950D590F55514900F3E8BA /* virtual-frame-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame-ia32.h"; sourceTree = "<group>"; };
+		58950D5A0F55514900F3E8BA /* virtual-frame.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "virtual-frame.cc"; sourceTree = "<group>"; };
+		58950D5B0F55514900F3E8BA /* virtual-frame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame.h"; sourceTree = "<group>"; };
 		8900116B0E71CA2300F91F35 /* libraries.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = libraries.cc; sourceTree = "<group>"; };
 		893986D40F29020C007D5254 /* apiutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = apiutils.h; sourceTree = "<group>"; };
 		8939880B0F2A35FA007D5254 /* v8_shell */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = v8_shell; sourceTree = BUILT_PRODUCTS_DIR; };
@@ -258,6 +288,8 @@
 		89471C7F0EB23EE400B6874B /* flag-definitions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "flag-definitions.h"; sourceTree = "<group>"; };
 		89495E460E79FC23001F68C3 /* compilation-cache.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "compilation-cache.cc"; sourceTree = "<group>"; };
 		89495E470E79FC23001F68C3 /* compilation-cache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "compilation-cache.h"; sourceTree = "<group>"; };
+		8956B6CD0F5D86570033B5A2 /* debug-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "debug-agent.cc"; sourceTree = "<group>"; };
+		8956B6CE0F5D86570033B5A2 /* debug-agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "debug-agent.h"; sourceTree = "<group>"; };
 		8964482B0E9C00F700E7C516 /* codegen-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "codegen-ia32.h"; sourceTree = "<group>"; };
 		896448BC0E9D530500E7C516 /* codegen-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "codegen-arm.h"; sourceTree = "<group>"; };
 		8970F2F00E719FB2006AE7B5 /* libv8.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libv8.a; sourceTree = BUILT_PRODUCTS_DIR; };
@@ -479,6 +511,8 @@
 		89B12E8D0E7FF2A40080BA62 /* presubmit.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = presubmit.py; sourceTree = "<group>"; };
 		89F23C870E78D5B2006B2466 /* libv8-arm.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libv8-arm.a"; sourceTree = BUILT_PRODUCTS_DIR; };
 		89F23C950E78D5B6006B2466 /* v8_shell-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-arm"; sourceTree = BUILT_PRODUCTS_DIR; };
+		9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "oprofile-agent.cc"; sourceTree = "<group>"; };
+		9FC86ABC0F5FEDAC00F22668 /* oprofile-agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "oprofile-agent.h"; sourceTree = "<group>"; };
 /* End PBXFileReference section */
 
 /* Begin PBXFrameworksBuildPhase section */
@@ -626,6 +660,8 @@
 				898BD20D0EF6CC850068B00A /* debug-ia32.cc */,
 				897FF1280E719B8F00D62E90 /* debug.cc */,
 				897FF1290E719B8F00D62E90 /* debug.h */,
+				8956B6CD0F5D86570033B5A2 /* debug-agent.cc */,
+				8956B6CE0F5D86570033B5A2 /* debug-agent.h */,
 				897FF12A0E719B8F00D62E90 /* disasm-arm.cc */,
 				897FF12B0E719B8F00D62E90 /* disasm-ia32.cc */,
 				897FF12C0E719B8F00D62E90 /* disasm.h */,
@@ -667,6 +703,10 @@
 				89A15C680EE4665300B48DEB /* jsregexp-inl.h */,
 				897FF14E0E719B8F00D62E90 /* jsregexp.cc */,
 				897FF14F0E719B8F00D62E90 /* jsregexp.h */,
+				58950D4E0F55514900F3E8BA /* jump-target-arm.cc */,
+				58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */,
+				58950D500F55514900F3E8BA /* jump-target.cc */,
+				58950D510F55514900F3E8BA /* jump-target.h */,
 				897FF1500E719B8F00D62E90 /* list-inl.h */,
 				897FF1510E719B8F00D62E90 /* list.h */,
 				897FF1520E719B8F00D62E90 /* log.cc */,
@@ -687,6 +727,8 @@
 				897FF1610E719B8F00D62E90 /* objects-inl.h */,
 				897FF1620E719B8F00D62E90 /* objects.cc */,
 				897FF1630E719B8F00D62E90 /* objects.h */,
+				9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */,
+				9FC86ABC0F5FEDAC00F22668 /* oprofile-agent.h */,
 				897FF1640E719B8F00D62E90 /* parser.cc */,
 				897FF1650E719B8F00D62E90 /* parser.h */,
 				89A15C6D0EE466A900B48DEB /* platform-freebsd.cc */,
@@ -712,6 +754,10 @@
 				89A15C7A0EE466D000B48DEB /* regexp-macro-assembler.h */,
 				8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */,
 				8944AD0F0F1D4D3A0028D560 /* regexp-stack.h */,
+				58950D520F55514900F3E8BA /* register-allocator-arm.cc */,
+				58950D530F55514900F3E8BA /* register-allocator-ia32.cc */,
+				58950D540F55514900F3E8BA /* register-allocator.cc */,
+				58950D550F55514900F3E8BA /* register-allocator.h */,
 				897FF16F0E719B8F00D62E90 /* rewriter.cc */,
 				897FF1700E719B8F00D62E90 /* rewriter.h */,
 				897FF1710E719B8F00D62E90 /* runtime.cc */,
@@ -762,6 +808,12 @@
 				897FF19E0E719B8F00D62E90 /* v8threads.h */,
 				897FF19F0E719B8F00D62E90 /* variables.cc */,
 				897FF1A00E719B8F00D62E90 /* variables.h */,
+				58950D560F55514900F3E8BA /* virtual-frame-arm.cc */,
+				58950D570F55514900F3E8BA /* virtual-frame-arm.h */,
+				58950D580F55514900F3E8BA /* virtual-frame-ia32.cc */,
+				58950D590F55514900F3E8BA /* virtual-frame-ia32.h */,
+				58950D5A0F55514900F3E8BA /* virtual-frame.cc */,
+				58950D5B0F55514900F3E8BA /* virtual-frame.h */,
 				897FF1A10E719B8F00D62E90 /* zone-inl.h */,
 				897FF1A20E719B8F00D62E90 /* zone.cc */,
 				897FF1A30E719B8F00D62E90 /* zone.h */,
@@ -1038,6 +1090,7 @@
 				89A88DFC0E71A6460043BA31 /* counters.cc in Sources */,
 				89A88DFD0E71A6470043BA31 /* cpu-ia32.cc in Sources */,
 				89A88DFE0E71A6480043BA31 /* dateparser.cc in Sources */,
+				8956B6CF0F5D86730033B5A2 /* debug-agent.cc in Sources */,
 				898BD20E0EF6CC930068B00A /* debug-ia32.cc in Sources */,
 				89A88DFF0E71A6530043BA31 /* debug.cc in Sources */,
 				89A88E000E71A6540043BA31 /* disasm-ia32.cc in Sources */,
@@ -1056,6 +1109,8 @@
 				89A88E0D0E71A66E0043BA31 /* ic.cc in Sources */,
 				89A15C850EE4678B00B48DEB /* interpreter-irregexp.cc in Sources */,
 				89A88E0E0E71A66F0043BA31 /* jsregexp.cc in Sources */,
+				58950D5E0F55519800F3E8BA /* jump-target.cc in Sources */,
+				58950D5F0F55519D00F3E8BA /* jump-target-ia32.cc in Sources */,
 				8900116C0E71CA2300F91F35 /* libraries.cc in Sources */,
 				89A88E0F0E71A6740043BA31 /* log.cc in Sources */,
 				89A88E100E71A6770043BA31 /* macro-assembler-ia32.cc in Sources */,
@@ -1072,6 +1127,8 @@
 				89A15C8A0EE467D100B48DEB /* regexp-macro-assembler-tracer.cc in Sources */,
 				89A15C810EE4674900B48DEB /* regexp-macro-assembler.cc in Sources */,
 				8944AD100F1D4D500028D560 /* regexp-stack.cc in Sources */,
+				58950D620F5551AF00F3E8BA /* register-allocator-ia32.cc in Sources */,
+				58950D630F5551AF00F3E8BA /* register-allocator.cc in Sources */,
 				89A88E190E71A6970043BA31 /* rewriter.cc in Sources */,
 				89A88E1A0E71A69B0043BA31 /* runtime.cc in Sources */,
 				89A88E1B0E71A69D0043BA31 /* scanner.cc in Sources */,
@@ -1093,7 +1150,10 @@
 				89A88E2B0E71A6D10043BA31 /* v8.cc in Sources */,
 				89A88E2C0E71A6D20043BA31 /* v8threads.cc in Sources */,
 				89A88E2D0E71A6D50043BA31 /* variables.cc in Sources */,
+				58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */,
+				58950D670F5551C400F3E8BA /* virtual-frame-ia32.cc in Sources */,
 				89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */,
+				9FC86ABD0F5FEDAC00F22668 /* oprofile-agent.cc in Sources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
@@ -1129,6 +1189,7 @@
 				89F23C4F0E78D5B2006B2466 /* counters.cc in Sources */,
 				89F23C9A0E78D5EC006B2466 /* cpu-arm.cc in Sources */,
 				89F23C510E78D5B2006B2466 /* dateparser.cc in Sources */,
+				894599A30F5D8729008DA8FB /* debug-agent.cc in Sources */,
 				898BD20F0EF6CC9A0068B00A /* debug-arm.cc in Sources */,
 				89F23C520E78D5B2006B2466 /* debug.cc in Sources */,
 				89F23C9B0E78D5EE006B2466 /* disasm-arm.cc in Sources */,
@@ -1147,6 +1208,8 @@
 				89F23C600E78D5B2006B2466 /* ic.cc in Sources */,
 				890A13FE0EE9C47F00E49346 /* interpreter-irregexp.cc in Sources */,
 				89F23C610E78D5B2006B2466 /* jsregexp.cc in Sources */,
+				58950D600F5551A300F3E8BA /* jump-target.cc in Sources */,
+				58950D610F5551A400F3E8BA /* jump-target-arm.cc in Sources */,
 				89F23C620E78D5B2006B2466 /* libraries.cc in Sources */,
 				89F23C630E78D5B2006B2466 /* log.cc in Sources */,
 				89F23C9E0E78D5FD006B2466 /* macro-assembler-arm.cc in Sources */,
@@ -1163,6 +1226,8 @@
 				890A14030EE9C4B500E49346 /* regexp-macro-assembler-tracer.cc in Sources */,
 				890A14040EE9C4B700E49346 /* regexp-macro-assembler.cc in Sources */,
 				8944AD110F1D4D570028D560 /* regexp-stack.cc in Sources */,
+				58950D640F5551B500F3E8BA /* register-allocator.cc in Sources */,
+				58950D650F5551B600F3E8BA /* register-allocator-arm.cc in Sources */,
 				89F23C6D0E78D5B2006B2466 /* rewriter.cc in Sources */,
 				89F23C6E0E78D5B2006B2466 /* runtime.cc in Sources */,
 				89F23C6F0E78D5B2006B2466 /* scanner.cc in Sources */,
@@ -1185,7 +1250,10 @@
 				89F23C7F0E78D5B2006B2466 /* v8.cc in Sources */,
 				89F23C800E78D5B2006B2466 /* v8threads.cc in Sources */,
 				89F23C810E78D5B2006B2466 /* variables.cc in Sources */,
+				58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */,
+				58950D690F5551CE00F3E8BA /* virtual-frame-arm.cc in Sources */,
 				89F23C820E78D5B2006B2466 /* zone.cc in Sources */,
+				9FC86ABE0F5FEDAC00F22668 /* oprofile-agent.cc in Sources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index 3c78db8..81e1f09 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -345,6 +345,14 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\debug-agent.cc"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\debug-agent.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\debug-ia32.cc"
 				>
 			</File>
@@ -481,6 +489,18 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\jump-target.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\jump-target.cc"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\jump-target-ia32.cc"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\jsregexp-inl.h"
 				>
 			</File>
@@ -569,6 +589,14 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\oprofile-agent.cc"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\oprofile-agent.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\parser.cc"
 				>
 			</File>
@@ -645,6 +673,18 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\register-allocator.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\register-allocator.cc"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\register-allocator-ia32.cc"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\rewriter.cc"
 				>
 			</File>
@@ -809,6 +849,22 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\virtual-frame.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\virtual-frame-ia32.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\virtual-frame.cc"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\virtual-frame-ia32.cc"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\zone-inl.h"
 				>
 			</File>
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index 7eeb0d5..a03306d 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -485,6 +485,18 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\jump-target.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\jump-target.cc"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\jump-target-arm.cc"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\jsregexp-inl.h"
 				>
 			</File>
@@ -649,6 +661,18 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\register-allocator.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\register-allocator.cc"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\register-allocator-arm.cc"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\rewriter.cc"
 				>
 			</File>
@@ -821,6 +845,22 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\virtual-frame.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\virtual-frame-arm.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\virtual-frame.cc"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\virtual-frame-arm.cc"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\zone-inl.h"
 				>
 			</File>