Version 2.5.1

Fixed bug causing spurious out of memory exceptions (issue http://crbug.com/54580).

Fixed compilation error on Solaris platform (issue 901).

Fixed error in strtod (string to floating point number conversion) due to glibc's use of 80-bit floats in the FPU on 32-bit linux.

Adjusted randomized allocations of executable memory to have 64k granularity (issue http://crbug.com/56036).

Supported profiling using kernel perf_events on linux.  Added ll_prof script to tools and --ll-prof flag to V8.


git-svn-id: http://v8.googlecode.com/svn/trunk@5675 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index d58efc5..36ed4fc 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,20 @@
+2010-10-20: Version 2.5.1
+
+        Fixed bug causing spurious out of memory exceptions
+        (issue http://crbug.com/54580).
+
+        Fixed compilation error on Solaris platform (issue 901).
+
+        Fixed error in strtod (string to floating point number conversion)
+        due to glibc's use of 80-bit floats in the FPU on 32-bit linux.
+
+        Adjusted randomized allocations of executable memory to have 64k
+        granularity (issue http://crbug.com/56036).
+
+        Supported profiling using kernel perf_events on linux.  Added ll_prof
+        script to tools and --ll-prof flag to V8.
+
+
 2010-10-18: Version 2.5.0
 
         Fixed bug in cache handling of lastIndex on global regexps
diff --git a/SConstruct b/SConstruct
index e33d2df..a36a96f 100644
--- a/SConstruct
+++ b/SConstruct
@@ -665,17 +665,17 @@
   'toolchain': {
     'values': ['gcc', 'msvc'],
     'default': TOOLCHAIN_GUESS,
-    'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')'
+    'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS
   },
   'os': {
     'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'],
     'default': OS_GUESS,
-    'help': 'the os to build for (' + OS_GUESS + ')'
+    'help': 'the os to build for (%s)' % OS_GUESS
   },
   'arch': {
     'values':['arm', 'ia32', 'x64', 'mips'],
     'default': ARCH_GUESS,
-    'help': 'the architecture to build for (' + ARCH_GUESS + ')'
+    'help': 'the architecture to build for (%s)' % ARCH_GUESS
   },
   'regexp': {
     'values': ['native', 'interpreted'],
diff --git a/src/SConscript b/src/SConscript
index e22ed11..8995d48 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -116,7 +116,6 @@
     variables.cc
     version.cc
     virtual-frame.cc
-    vm-state.cc
     zone.cc
     """),
   'arch:arm': Split("""
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index c9ef29a..b3b0766 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -935,11 +935,8 @@
     __ orr(r2, r1, r0);
     __ tst(r2, Operand(kSmiTagMask));
     __ b(ne, &not_two_smis);
-    __ sub(r0, r1, r0, SetCC);
-    __ b(vc, &smi_done);
-    // Correct the sign in case of overflow.
-    __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
-    __ bind(&smi_done);
+    __ mov(r1, Operand(r1, ASR, 1));
+    __ sub(r0, r1, Operand(r0, ASR, 1));
     __ Ret();
     __ bind(&not_two_smis);
   } else if (FLAG_debug_code) {
@@ -2300,13 +2297,7 @@
 
 
 void StackCheckStub::Generate(MacroAssembler* masm) {
-  // Do tail-call to runtime routine.  Runtime routines expect at least one
-  // argument, so give it a Smi.
-  __ mov(r0, Operand(Smi::FromInt(0)));
-  __ push(r0);
-  __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
-
-  __ Ret();
+  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
 }
 
 
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 8f45886..37bb1f0 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -142,7 +142,6 @@
 
 void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) {
   if (by != 0) {
-    Label inside_string;
     __ add(current_input_offset(),
            current_input_offset(), Operand(by * char_size()));
   }
@@ -927,6 +926,19 @@
 }
 
 
+void RegExpMacroAssemblerARM::SetCurrentPositionFromEnd(int by) {
+  Label after_position;
+  __ cmp(current_input_offset(), Operand(-by * char_size()));
+  __ b(ge, &after_position);
+  __ mov(current_input_offset(), Operand(-by * char_size()));
+  // On RegExp code entry (where this operation is used), the character before
+  // the current position is expected to be already loaded.
+  // We have advanced the position, so it's safe to read backwards.
+  LoadCurrentCharacterUnchecked(-1, 1);
+  __ bind(&after_position);
+}
+
+
 void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
   ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
   __ mov(r0, Operand(to));
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 93a74d7..4e09f67 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -100,6 +100,7 @@
                             StackCheckFlag check_stack_limit);
   virtual void ReadCurrentPositionFromRegister(int reg);
   virtual void ReadStackPointerFromRegister(int reg);
+  virtual void SetCurrentPositionFromEnd(int by);
   virtual void SetRegister(int register_index, int to);
   virtual void Succeed();
   virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
diff --git a/src/ast.cc b/src/ast.cc
index f47dffd..92f1496 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -398,39 +398,70 @@
 }
 
 
-bool RegExpAssertion::IsAnchored() {
+bool RegExpAssertion::IsAnchoredAtStart() {
   return type() == RegExpAssertion::START_OF_INPUT;
 }
 
 
-bool RegExpAlternative::IsAnchored() {
+bool RegExpAssertion::IsAnchoredAtEnd() {
+  return type() == RegExpAssertion::END_OF_INPUT;
+}
+
+
+bool RegExpAlternative::IsAnchoredAtStart() {
   ZoneList<RegExpTree*>* nodes = this->nodes();
   for (int i = 0; i < nodes->length(); i++) {
     RegExpTree* node = nodes->at(i);
-    if (node->IsAnchored()) { return true; }
+    if (node->IsAnchoredAtStart()) { return true; }
     if (node->max_match() > 0) { return false; }
   }
   return false;
 }
 
 
-bool RegExpDisjunction::IsAnchored() {
+bool RegExpAlternative::IsAnchoredAtEnd() {
+  ZoneList<RegExpTree*>* nodes = this->nodes();
+  for (int i = nodes->length() - 1; i >= 0; i--) {
+    RegExpTree* node = nodes->at(i);
+    if (node->IsAnchoredAtEnd()) { return true; }
+    if (node->max_match() > 0) { return false; }
+  }
+  return false;
+}
+
+
+bool RegExpDisjunction::IsAnchoredAtStart() {
   ZoneList<RegExpTree*>* alternatives = this->alternatives();
   for (int i = 0; i < alternatives->length(); i++) {
-    if (!alternatives->at(i)->IsAnchored())
+    if (!alternatives->at(i)->IsAnchoredAtStart())
       return false;
   }
   return true;
 }
 
 
-bool RegExpLookahead::IsAnchored() {
-  return is_positive() && body()->IsAnchored();
+bool RegExpDisjunction::IsAnchoredAtEnd() {
+  ZoneList<RegExpTree*>* alternatives = this->alternatives();
+  for (int i = 0; i < alternatives->length(); i++) {
+    if (!alternatives->at(i)->IsAnchoredAtEnd())
+      return false;
+  }
+  return true;
 }
 
 
-bool RegExpCapture::IsAnchored() {
-  return body()->IsAnchored();
+bool RegExpLookahead::IsAnchoredAtStart() {
+  return is_positive() && body()->IsAnchoredAtStart();
+}
+
+
+bool RegExpCapture::IsAnchoredAtStart() {
+  return body()->IsAnchoredAtStart();
+}
+
+
+bool RegExpCapture::IsAnchoredAtEnd() {
+  return body()->IsAnchoredAtEnd();
 }
 
 
diff --git a/src/ast.h b/src/ast.h
index e8d54e4..a01e48d 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1523,7 +1523,8 @@
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
                              RegExpNode* on_success) = 0;
   virtual bool IsTextElement() { return false; }
-  virtual bool IsAnchored() { return false; }
+  virtual bool IsAnchoredAtStart() { return false; }
+  virtual bool IsAnchoredAtEnd() { return false; }
   virtual int min_match() = 0;
   virtual int max_match() = 0;
   // Returns the interval of registers used for captures within this
@@ -1548,7 +1549,8 @@
   virtual RegExpDisjunction* AsDisjunction();
   virtual Interval CaptureRegisters();
   virtual bool IsDisjunction();
-  virtual bool IsAnchored();
+  virtual bool IsAnchoredAtStart();
+  virtual bool IsAnchoredAtEnd();
   virtual int min_match() { return min_match_; }
   virtual int max_match() { return max_match_; }
   ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
@@ -1568,7 +1570,8 @@
   virtual RegExpAlternative* AsAlternative();
   virtual Interval CaptureRegisters();
   virtual bool IsAlternative();
-  virtual bool IsAnchored();
+  virtual bool IsAnchoredAtStart();
+  virtual bool IsAnchoredAtEnd();
   virtual int min_match() { return min_match_; }
   virtual int max_match() { return max_match_; }
   ZoneList<RegExpTree*>* nodes() { return nodes_; }
@@ -1595,7 +1598,8 @@
                              RegExpNode* on_success);
   virtual RegExpAssertion* AsAssertion();
   virtual bool IsAssertion();
-  virtual bool IsAnchored();
+  virtual bool IsAnchoredAtStart();
+  virtual bool IsAnchoredAtEnd();
   virtual int min_match() { return 0; }
   virtual int max_match() { return 0; }
   Type type() { return type_; }
@@ -1768,7 +1772,8 @@
                             RegExpCompiler* compiler,
                             RegExpNode* on_success);
   virtual RegExpCapture* AsCapture();
-  virtual bool IsAnchored();
+  virtual bool IsAnchoredAtStart();
+  virtual bool IsAnchoredAtEnd();
   virtual Interval CaptureRegisters();
   virtual bool IsCapture();
   virtual int min_match() { return body_->min_match(); }
@@ -1800,7 +1805,7 @@
   virtual RegExpLookahead* AsLookahead();
   virtual Interval CaptureRegisters();
   virtual bool IsLookahead();
-  virtual bool IsAnchored();
+  virtual bool IsAnchoredAtStart();
   virtual int min_match() { return 0; }
   virtual int max_match() { return 0; }
   RegExpTree* body() { return body_; }
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index aa8d8e5..d7491e1 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1814,6 +1814,11 @@
     i::Counters::contexts_created_from_scratch.Increment();
   }
 
+  // Add this context to the weak list of global contexts.
+  (*global_context_)->set(Context::NEXT_CONTEXT_LINK,
+                          Heap::global_contexts_list());
+  Heap::set_global_contexts_list(*global_context_);
+
   result_ = global_context_;
 }
 
diff --git a/src/bytecodes-irregexp.h b/src/bytecodes-irregexp.h
index bcb34c8..93218ea 100644
--- a/src/bytecodes-irregexp.h
+++ b/src/bytecodes-irregexp.h
@@ -88,7 +88,8 @@
 V(CHECK_AT_START,    44, 8)   /* bc8 pad24 addr32                           */ \
 V(CHECK_NOT_AT_START, 45, 8)  /* bc8 pad24 addr32                           */ \
 V(CHECK_GREEDY,      46, 8)   /* bc8 pad24 addr32                           */ \
-V(ADVANCE_CP_AND_GOTO, 47, 8) /* bc8 offset24 addr32                        */
+V(ADVANCE_CP_AND_GOTO, 47, 8) /* bc8 offset24 addr32                        */ \
+V(SET_CURRENT_POSITION_FROM_END, 48, 4) /* bc8 idx24                        */
 
 #define DECLARE_BYTECODES(name, code, length) \
   static const int BC_##name = code;
diff --git a/src/contexts.h b/src/contexts.h
index 78dda6a..9722a93 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -225,7 +225,15 @@
     OUT_OF_MEMORY_INDEX,
     MAP_CACHE_INDEX,
     CONTEXT_DATA_INDEX,
-    GLOBAL_CONTEXT_SLOTS
+
+    // Properties from here are treated as weak references by the full GC.
+    // Scavenge treats them as strong references.
+    NEXT_CONTEXT_LINK,
+
+    // Total number of slots.
+    GLOBAL_CONTEXT_SLOTS,
+
+    FIRST_WEAK_SLOT = NEXT_CONTEXT_LINK
   };
 
   // Direct slot access.
@@ -333,6 +341,17 @@
     return kHeaderSize + index * kPointerSize - kHeapObjectTag;
   }
 
+  static const int kSize = kHeaderSize + GLOBAL_CONTEXT_SLOTS * kPointerSize;
+
+  // GC support.
+  typedef FixedBodyDescriptor<
+      kHeaderSize, kSize, kSize> ScavengeBodyDescriptor;
+
+  typedef FixedBodyDescriptor<
+      kHeaderSize,
+      kHeaderSize + FIRST_WEAK_SLOT * kPointerSize,
+      kSize> MarkCompactBodyDescriptor;
+
  private:
   // Unchecked access to the slots.
   Object* unchecked_previous() { return get(PREVIOUS_INDEX); }
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index acf3349..da19a45 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -188,6 +188,20 @@
 }
 
 
+void ProfilerEventsProcessor::ProcessMovedFunctions() {
+  for (int i = 0; i < moved_functions_.length(); ++i) {
+    JSFunction* function = moved_functions_[i];
+    CpuProfiler::FunctionCreateEvent(function);
+  }
+  moved_functions_.Clear();
+}
+
+
+void ProfilerEventsProcessor::RememberMovedFunction(JSFunction* function) {
+  moved_functions_.Add(function);
+}
+
+
 void ProfilerEventsProcessor::RegExpCodeCreateEvent(
     Logger::LogEventsAndTags tag,
     const char* prefix,
@@ -426,8 +440,12 @@
 }
 
 
-void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function,
-                                              HeapObject* source) {
+void CpuProfiler::ProcessMovedFunctions() {
+  singleton_->processor_->ProcessMovedFunctions();
+}
+
+
+void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function) {
   // This function is called from GC iterators (during Scavenge,
   // MC, and MS), so marking bits can be set on objects. That's
   // why unchecked accessors are used here.
@@ -436,27 +454,7 @@
   if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile)
       || singleton_->processor_->IsKnownFunction(function->address())) return;
 
-  int security_token_id = TokenEnumerator::kNoSecurityToken;
-  // In debug mode, assertions may fail for contexts,
-  // and we can live without security tokens in debug mode.
-#ifndef DEBUG
-  if (function->unchecked_context()->IsContext()) {
-    security_token_id = singleton_->token_enumerator_->GetTokenId(
-        function->context()->global_context()->security_token());
-  }
-  // Security token may not be moved yet.
-  if (security_token_id == TokenEnumerator::kNoSecurityToken) {
-    JSFunction* old_function = reinterpret_cast<JSFunction*>(source);
-    if (old_function->unchecked_context()->IsContext()) {
-      security_token_id = singleton_->token_enumerator_->GetTokenId(
-          old_function->context()->global_context()->security_token());
-    }
-  }
-#endif
-  singleton_->processor_->FunctionCreateEvent(
-      function->address(),
-      function->unchecked_code()->address(),
-      security_token_id);
+  singleton_->processor_->RememberMovedFunction(function);
 }
 
 
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index 86f9f67..d3158d7 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -165,6 +165,8 @@
   // Puts current stack into tick sample events buffer.
   void AddCurrentStack();
   bool IsKnownFunction(Address start);
+  void ProcessMovedFunctions();
+  void RememberMovedFunction(JSFunction* function);
 
   // Tick sample events are filled directly in the buffer of the circular
   // queue (because the structure is of fixed width, but usually not all
@@ -202,6 +204,7 @@
 
   // Used from the VM thread.
   HashMap* known_functions_;
+  List<JSFunction*> moved_functions_;
 };
 
 } }  // namespace v8::internal
@@ -251,17 +254,18 @@
                               String* source, int line);
   static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                               Code* code, int args_count);
+  static void CodeMovingGCEvent() {}
   static void CodeMoveEvent(Address from, Address to);
   static void CodeDeleteEvent(Address from);
   static void FunctionCreateEvent(JSFunction* function);
   // Reports function creation in case we had missed it (e.g.
   // if it was created from compiled code).
-  static void FunctionCreateEventFromMove(JSFunction* function,
-                                          HeapObject* source);
+  static void FunctionCreateEventFromMove(JSFunction* function);
   static void FunctionMoveEvent(Address from, Address to);
   static void FunctionDeleteEvent(Address from);
   static void GetterCallbackEvent(String* name, Address entry_point);
   static void RegExpCodeCreateEvent(Code* code, String* source);
+  static void ProcessMovedFunctions();
   static void SetterCallbackEvent(String* name, Address entry_point);
 
   static INLINE(bool is_profiling()) {
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 84a0eaa..2474c62 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -412,6 +412,7 @@
             "Update sliding state window counters.")
 DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
 DEFINE_bool(oprofile, false, "Enable JIT agent for OProfile.")
+DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
 
 //
 // Heap protection flags
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 27a14bc..104292d 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -76,7 +76,7 @@
   if (FLAG_gc_interval >= 0 &&
       !disallow_allocation_failure_ &&
       Heap::allocation_timeout_-- <= 0) {
-    return Failure::RetryAfterGC(size_in_bytes, space);
+    return Failure::RetryAfterGC(space);
   }
   Counters::objs_since_last_full.Increment();
   Counters::objs_since_last_young.Increment();
@@ -389,8 +389,12 @@
 }
 
 
+#ifdef DEBUG
 #define GC_GREEDY_CHECK() \
-  ASSERT(!FLAG_gc_greedy || v8::internal::Heap::GarbageCollectionGreedyCheck())
+  if (FLAG_gc_greedy) v8::internal::Heap::GarbageCollectionGreedyCheck()
+#else
+#define GC_GREEDY_CHECK() { }
+#endif
 
 
 // Calls the FUNCTION_CALL function and retries it up to three times
@@ -409,8 +413,7 @@
       v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\
     }                                                                     \
     if (!__object__->IsRetryAfterGC()) RETURN_EMPTY;                      \
-    Heap::CollectGarbage(Failure::cast(__object__)->requested(),          \
-                         Failure::cast(__object__)->allocation_space());  \
+    Heap::CollectGarbage(Failure::cast(__object__)->allocation_space());  \
     __object__ = FUNCTION_CALL;                                           \
     if (!__object__->IsFailure()) RETURN_VALUE;                           \
     if (__object__->IsOutOfMemoryFailure()) {                             \
diff --git a/src/heap.cc b/src/heap.cc
index 23bfbd8..675639a 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -54,6 +54,7 @@
 
 String* Heap::hidden_symbol_;
 Object* Heap::roots_[Heap::kRootListLength];
+Object* Heap::global_contexts_list_;
 
 NewSpace Heap::new_space_;
 OldSpace* Heap::old_pointer_space_ = NULL;
@@ -420,7 +421,7 @@
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
   MarkCompactCollector::SetForceCompaction(force_compaction);
-  CollectGarbage(0, OLD_POINTER_SPACE, collectionPolicy);
+  CollectGarbage(OLD_POINTER_SPACE, collectionPolicy);
   MarkCompactCollector::SetForceCompaction(false);
 }
 
@@ -431,8 +432,7 @@
 }
 
 
-bool Heap::CollectGarbage(int requested_size,
-                          AllocationSpace space,
+void Heap::CollectGarbage(AllocationSpace space,
                           CollectionPolicy collectionPolicy) {
   // The VM is in the GC state until exiting this function.
   VMState state(GC);
@@ -469,25 +469,8 @@
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log_gc) HeapProfiler::WriteSample();
+  if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
 #endif
-
-  switch (space) {
-    case NEW_SPACE:
-      return new_space_.Available() >= requested_size;
-    case OLD_POINTER_SPACE:
-      return old_pointer_space_->Available() >= requested_size;
-    case OLD_DATA_SPACE:
-      return old_data_space_->Available() >= requested_size;
-    case CODE_SPACE:
-      return code_space_->Available() >= requested_size;
-    case MAP_SPACE:
-      return map_space_->Available() >= requested_size;
-    case CELL_SPACE:
-      return cell_space_->Available() >= requested_size;
-    case LO_SPACE:
-      return lo_space_->Available() >= requested_size;
-  }
-  return false;
 }
 
 
@@ -542,27 +525,27 @@
   while (gc_performed) {
     gc_performed = false;
     if (!new_space->ReserveSpace(new_space_size)) {
-      Heap::CollectGarbage(new_space_size, NEW_SPACE);
+      Heap::CollectGarbage(NEW_SPACE);
       gc_performed = true;
     }
     if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
-      Heap::CollectGarbage(pointer_space_size, OLD_POINTER_SPACE);
+      Heap::CollectGarbage(OLD_POINTER_SPACE);
       gc_performed = true;
     }
     if (!(old_data_space->ReserveSpace(data_space_size))) {
-      Heap::CollectGarbage(data_space_size, OLD_DATA_SPACE);
+      Heap::CollectGarbage(OLD_DATA_SPACE);
       gc_performed = true;
     }
     if (!(code_space->ReserveSpace(code_space_size))) {
-      Heap::CollectGarbage(code_space_size, CODE_SPACE);
+      Heap::CollectGarbage(CODE_SPACE);
       gc_performed = true;
     }
     if (!(map_space->ReserveSpace(map_space_size))) {
-      Heap::CollectGarbage(map_space_size, MAP_SPACE);
+      Heap::CollectGarbage(MAP_SPACE);
       gc_performed = true;
     }
     if (!(cell_space->ReserveSpace(cell_space_size))) {
-      Heap::CollectGarbage(cell_space_size, CELL_SPACE);
+      Heap::CollectGarbage(CELL_SPACE);
       gc_performed = true;
     }
     // We add a slack-factor of 2 in order to have space for a series of
@@ -574,7 +557,7 @@
     large_object_size += cell_space_size + map_space_size + code_space_size +
         data_space_size + pointer_space_size;
     if (!(lo_space->ReserveSpace(large_object_size))) {
-      Heap::CollectGarbage(large_object_size, LO_SPACE);
+      Heap::CollectGarbage(LO_SPACE);
       gc_performed = true;
     }
   }
@@ -624,19 +607,14 @@
 }
 
 
-class ClearThreadNormalizedMapCachesVisitor: public ThreadVisitor {
-  virtual void VisitThread(ThreadLocalTop* top) {
-    Context* context = top->context_;
-    if (context == NULL) return;
-    context->global()->global_context()->normalized_map_cache()->Clear();
-  }
-};
-
-
 void Heap::ClearNormalizedMapCaches() {
   if (Bootstrapper::IsActive()) return;
-  ClearThreadNormalizedMapCachesVisitor visitor;
-  ThreadManager::IterateArchivedThreads(&visitor);
+
+  Object* context = global_contexts_list_;
+  while (!context->IsUndefined()) {
+    Context::cast(context)->normalized_map_cache()->Clear();
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  }
 }
 
 
@@ -685,6 +663,10 @@
 void Heap::PerformGarbageCollection(GarbageCollector collector,
                                     GCTracer* tracer,
                                     CollectionPolicy collectionPolicy) {
+  if (collector != SCAVENGER) {
+    PROFILE(CodeMovingGCEvent());
+  }
+
   VerifySymbolTable();
   if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
     ASSERT(!allocation_allowed_);
@@ -1034,6 +1016,9 @@
     }
   }
 
+  // Scavenge object reachable from the global contexts list directly.
+  scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
+
   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
 
   UpdateNewSpaceReferencesInExternalStringTable(
@@ -1101,6 +1086,44 @@
 }
 
 
+void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
+  Object* head = undefined_value();
+  Context* tail = NULL;
+  Object* candidate = global_contexts_list_;
+  while (!candidate->IsUndefined()) {
+    // Check whether to keep the candidate in the list.
+    Context* candidate_context = reinterpret_cast<Context*>(candidate);
+    Object* retain = retainer->RetainAs(candidate);
+    if (retain != NULL) {
+      if (head->IsUndefined()) {
+        // First element in the list.
+        head = candidate_context;
+      } else {
+        // Subsequent elements in the list.
+        ASSERT(tail != NULL);
+        tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
+                            candidate_context,
+                            UPDATE_WRITE_BARRIER);
+      }
+      // Retained context is new tail.
+      tail = candidate_context;
+    }
+    // Move to next element in the list.
+    candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
+  }
+
+  // Terminate the list if there is one or more elements.
+  if (tail != NULL) {
+    tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
+                        Heap::undefined_value(),
+                        UPDATE_WRITE_BARRIER);
+  }
+
+  // Update the head of the list of contexts.
+  Heap::global_contexts_list_ = head;
+}
+
+
 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
  public:
   static inline void VisitPointer(Object** p) {
@@ -1157,6 +1180,9 @@
     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
     table_.Register(kVisitByteArray, &EvacuateByteArray);
     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
+    table_.Register(kVisitGlobalContext,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
+                        VisitSpecialized<Context::kSize>);
 
     typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
 
@@ -1235,7 +1261,7 @@
     if (Logger::is_logging() || CpuProfiler::is_profiling()) {
       if (target->IsJSFunction()) {
         PROFILE(FunctionMoveEvent(source->address(), target->address()));
-        PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target), source));
+        PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target)));
       }
     }
 #endif
@@ -1647,7 +1673,9 @@
 
   obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
   if (obj->IsFailure()) return false;
-  set_global_context_map(Map::cast(obj));
+  Map* global_context_map = Map::cast(obj);
+  global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
+  set_global_context_map(global_context_map);
 
   obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
                     SharedFunctionInfo::kAlignedSize);
@@ -3431,7 +3459,7 @@
       HistogramTimerScope scope(&Counters::gc_context);
       CollectAllGarbage(false);
     } else {
-      CollectGarbage(0, NEW_SPACE);
+      CollectGarbage(NEW_SPACE);
     }
     new_space_.Shrink();
     last_gc_count = gc_count_;
@@ -4236,6 +4264,8 @@
 
     // Create initial objects
     if (!CreateInitialObjects()) return false;
+
+    global_contexts_list_ = undefined_value();
   }
 
   LOG(IntPtrTEvent("heap-capacity", Capacity()));
@@ -4937,11 +4967,11 @@
 
 
 #ifdef DEBUG
-bool Heap::GarbageCollectionGreedyCheck() {
+void Heap::GarbageCollectionGreedyCheck() {
   ASSERT(FLAG_gc_greedy);
-  if (Bootstrapper::IsActive()) return true;
-  if (disallow_allocation_failure()) return true;
-  return CollectGarbage(0, NEW_SPACE);
+  if (Bootstrapper::IsActive()) return;
+  if (disallow_allocation_failure()) return;
+  CollectGarbage(NEW_SPACE);
 }
 #endif
 
diff --git a/src/heap.h b/src/heap.h
index b1ef19f..6d32a4b 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -202,9 +202,10 @@
   V(closure_symbol, "(closure)")
 
 
-// Forward declaration of the GCTracer class.
+// Forward declarations.
 class GCTracer;
 class HeapStats;
+class WeakObjectRetainer;
 
 
 typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
@@ -696,8 +697,7 @@
 
   // Performs garbage collection operation.
   // Returns whether required_space bytes are available after the collection.
-  static bool CollectGarbage(int required_space,
-                             AllocationSpace space,
+  static void CollectGarbage(AllocationSpace space,
                              CollectionPolicy collectionPolicy = NORMAL);
 
   // Performs a full garbage collection. Force compaction if the
@@ -717,7 +717,7 @@
 
 #ifdef DEBUG
   // Utility used with flag gc-greedy.
-  static bool GarbageCollectionGreedyCheck();
+  static void GarbageCollectionGreedyCheck();
 #endif
 
   static void AddGCPrologueCallback(
@@ -767,6 +767,11 @@
   // not match the empty string.
   static String* hidden_symbol() { return hidden_symbol_; }
 
+  static void set_global_contexts_list(Object* object) {
+    global_contexts_list_ = object;
+  }
+  static Object* global_contexts_list() { return global_contexts_list_; }
+
   // Iterates over all roots in the heap.
   static void IterateRoots(ObjectVisitor* v, VisitMode mode);
   // Iterates over all strong roots in the heap.
@@ -870,6 +875,11 @@
   // Generated code can embed this address to get access to the roots.
   static Object** roots_address() { return roots_; }
 
+  // Get address of global contexts list for serialization support.
+  static Object** global_contexts_list_address() {
+    return &global_contexts_list_;
+  }
+
 #ifdef DEBUG
   static void Print();
   static void PrintHandles();
@@ -1051,6 +1061,8 @@
   static void UpdateNewSpaceReferencesInExternalStringTable(
       ExternalStringTableUpdaterCallback updater_func);
 
+  static void ProcessWeakReferences(WeakObjectRetainer* retainer);
+
   // Helper function that governs the promotion policy from new space to
   // old.  If the object's old address lies below the new space's age
   // mark or if we've already filled the bottom 1/16th of the to space,
@@ -1157,6 +1169,8 @@
 
   static Object* roots_[kRootListLength];
 
+  static Object* global_contexts_list_;
+
   struct StringTypeTable {
     InstanceType type;
     int size;
@@ -2043,6 +2057,19 @@
   static List<Object*> old_space_strings_;
 };
 
+
+// Abstract base class for checking whether a weak object should be retained.
+class WeakObjectRetainer {
+ public:
+  virtual ~WeakObjectRetainer() {}
+
+  // Return whether this object should be retained. If NULL is returned the
+  // object has no references. Otherwise the address of the retained object
+  // should be returned as in some GC situations the object has been moved.
+  virtual Object* RetainAs(Object* object) = 0;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_HEAP_H_
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 3e2b7ae..348bb14 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -2638,7 +2638,7 @@
     __ j(not_zero, &non_smi, not_taken);
     __ sub(edx, Operand(eax));  // Return on the result of the subtraction.
     __ j(no_overflow, &smi_done);
-    __ neg(edx);  // Correct sign in case of overflow.
+    __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
     __ bind(&smi_done);
     __ mov(eax, edx);
     __ ret(0);
@@ -2964,16 +2964,7 @@
 
 
 void StackCheckStub::Generate(MacroAssembler* masm) {
-  // Because builtins always remove the receiver from the stack, we
-  // have to fake one to avoid underflowing the stack. The receiver
-  // must be inserted below the return address on the stack so we
-  // temporarily store that in a register.
-  __ pop(eax);
-  __ push(Immediate(Smi::FromInt(0)));
-  __ push(eax);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
+  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
 }
 
 
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 2aab7a8..e2853e8 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -133,7 +133,6 @@
 
 void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
   if (by != 0) {
-    Label inside_string;
     __ add(Operand(edi), Immediate(by * char_size()));
   }
 }
@@ -964,6 +963,17 @@
   __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
 }
 
+void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by)  {
+  NearLabel after_position;
+  __ cmp(edi, -by * char_size());
+  __ j(greater_equal, &after_position);
+  __ mov(edi, -by * char_size());
+  // On RegExp code entry (where this operation is used), the character before
+  // the current position is expected to be already loaded.
+  // We have advanced the position, so it's safe to read backwards.
+  LoadCurrentCharacterUnchecked(-1, 1);
+  __ bind(&after_position);
+}
 
 void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
   ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 8b8eeed..51e2cb0 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -98,6 +98,7 @@
                             StackCheckFlag check_stack_limit);
   virtual void ReadCurrentPositionFromRegister(int reg);
   virtual void ReadStackPointerFromRegister(int reg);
+  virtual void SetCurrentPositionFromEnd(int by);
   virtual void SetRegister(int register_index, int to);
   virtual void Succeed();
   virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index a904447..c9c3cc4 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -607,6 +607,15 @@
           pc = code_base + Load32Aligned(pc + 4);
         }
         break;
+      BYTECODE(SET_CURRENT_POSITION_FROM_END) {
+        int by = static_cast<uint32_t>(insn) >> BYTECODE_SHIFT;
+        if (subject.length() - current > by) {
+          current = subject.length() - by;
+          current_char = subject[current - 1];
+        }
+        pc += BC_SET_CURRENT_POSITION_FROM_END_LENGTH;
+        break;
+      }
       default:
         UNREACHABLE();
         break;
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 82a370f..3c5ddfb 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -5180,7 +5180,10 @@
                                                     &compiler,
                                                     compiler.accept());
   RegExpNode* node = captured_body;
-  if (!data->tree->IsAnchored()) {
+  bool is_end_anchored = data->tree->IsAnchoredAtEnd();
+  bool is_start_anchored = data->tree->IsAnchoredAtStart();
+  int max_length = data->tree->max_match();
+  if (!is_start_anchored) {
     // Add a .*? at the beginning, outside the body capture, unless
     // this expression is anchored at the beginning.
     RegExpNode* loop_node =
@@ -5236,6 +5239,15 @@
   RegExpMacroAssemblerIrregexp macro_assembler(codes);
 #endif  // V8_INTERPRETED_REGEXP
 
+  // Inserted here, instead of in Assembler, because it depends on information
+  // in the AST that isn't replicated in the Node structure.
+  static const int kMaxBacksearchLimit = 1024;
+  if (is_end_anchored &&
+      !is_start_anchored &&
+      max_length < kMaxBacksearchLimit) {
+    macro_assembler.SetCurrentPositionFromEnd(max_length);
+  }
+
   return compiler.Assemble(&macro_assembler,
                            node,
                            data->capture_count,
diff --git a/src/log-utils.cc b/src/log-utils.cc
index 62f0ca6..d6d8754 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -122,6 +122,7 @@
 bool Log::is_stopped_ = false;
 Log::WritePtr Log::Write = NULL;
 FILE* Log::output_handle_ = NULL;
+FILE* Log::output_code_handle_ = NULL;
 LogDynamicBuffer* Log::output_buffer_ = NULL;
 // Must be the same message as in Logger::PauseProfiler.
 const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
@@ -143,9 +144,22 @@
 }
 
 
+static const char kCodeLogExt[] = ".code";
+
+
 void Log::OpenFile(const char* name) {
   ASSERT(!IsEnabled());
   output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
+  if (FLAG_ll_prof) {
+    // Open a file for logging the contents of code objects so that
+    // they can be disassembled later.
+    size_t name_len = strlen(name);
+    ScopedVector<char> code_name(
+        static_cast<int>(name_len + sizeof(kCodeLogExt)));
+    memcpy(code_name.start(), name, name_len);
+    memcpy(code_name.start() + name_len, kCodeLogExt, sizeof(kCodeLogExt));
+    output_code_handle_ = OS::FOpen(code_name.start(), OS::LogFileOpenMode);
+  }
   Write = WriteToFile;
   Init();
 }
@@ -165,6 +179,8 @@
   if (Write == WriteToFile) {
     if (output_handle_ != NULL) fclose(output_handle_);
     output_handle_ = NULL;
+    if (output_code_handle_ != NULL) fclose(output_code_handle_);
+    output_code_handle_ = NULL;
   } else if (Write == WriteToMemory) {
     delete output_buffer_;
     output_buffer_ = NULL;
diff --git a/src/log-utils.h b/src/log-utils.h
index a4dde21..ffea928 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -152,6 +152,9 @@
   // mutex_ should be acquired before using output_handle_ or output_buffer_.
   static FILE* output_handle_;
 
+  // Used when low-level profiling is active to save code object contents.
+  static FILE* output_code_handle_;
+
   static LogDynamicBuffer* output_buffer_;
 
   // Size of dynamic buffer block (and dynamic buffer initial size).
@@ -171,6 +174,7 @@
   // mutex_ should be acquired before using it.
   static char* message_buffer_;
 
+  friend class Logger;
   friend class LogMessageBuilder;
   friend class LogRecordCompressor;
 };
diff --git a/src/log.cc b/src/log.cc
index 4230cba..1b0fdeb 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -191,11 +191,12 @@
 
   ~Ticker() { if (IsActive()) Stop(); }
 
-  void SampleStack(TickSample* sample) {
+  virtual void SampleStack(TickSample* sample) {
+    ASSERT(IsSynchronous());
     StackTracer::Trace(sample);
   }
 
-  void Tick(TickSample* sample) {
+  virtual void Tick(TickSample* sample) {
     if (profiler_) profiler_->Insert(sample);
     if (window_) window_->AddState(sample->state);
   }
@@ -765,6 +766,7 @@
     msg.Append(*p);
   }
   msg.Append('"');
+  LowLevelCodeCreateEvent(code, &msg);
   if (FLAG_compress_log) {
     ASSERT(compression_helper_ != NULL);
     if (!compression_helper_->HandleMessage(&msg)) return;
@@ -784,6 +786,7 @@
   msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
   msg.AppendAddress(code->address());
   msg.Append(",%d,\"%s\"", code->ExecutableSize(), *str);
+  LowLevelCodeCreateEvent(code, &msg);
   if (FLAG_compress_log) {
     ASSERT(compression_helper_ != NULL);
     if (!compression_helper_->HandleMessage(&msg)) return;
@@ -808,6 +811,7 @@
   msg.AppendAddress(code->address());
   msg.Append(",%d,\"%s %s:%d\"",
              code->ExecutableSize(), *str, *sourcestr, line);
+  LowLevelCodeCreateEvent(code, &msg);
   if (FLAG_compress_log) {
     ASSERT(compression_helper_ != NULL);
     if (!compression_helper_->HandleMessage(&msg)) return;
@@ -825,6 +829,7 @@
   msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
   msg.AppendAddress(code->address());
   msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
+  LowLevelCodeCreateEvent(code, &msg);
   if (FLAG_compress_log) {
     ASSERT(compression_helper_ != NULL);
     if (!compression_helper_->HandleMessage(&msg)) return;
@@ -835,6 +840,17 @@
 }
 
 
+void Logger::CodeMovingGCEvent() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
+  LogMessageBuilder msg;
+  msg.Append("%s\n", log_events_[CODE_MOVING_GC]);
+  msg.WriteToLogFile();
+  OS::SignalCodeMovingGC();
+#endif
+}
+
+
 void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log_code) return;
@@ -845,6 +861,7 @@
   msg.Append(",%d,\"", code->ExecutableSize());
   msg.AppendDetailed(source, false);
   msg.Append('\"');
+  LowLevelCodeCreateEvent(code, &msg);
   if (FLAG_compress_log) {
     ASSERT(compression_helper_ != NULL);
     if (!compression_helper_->HandleMessage(&msg)) return;
@@ -909,8 +926,7 @@
 }
 
 
-void Logger::FunctionCreateEventFromMove(JSFunction* function,
-                                         HeapObject*) {
+void Logger::FunctionCreateEventFromMove(JSFunction* function) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile)) {
     FunctionCreateEvent(function);
@@ -1340,6 +1356,34 @@
 }
 
 
+void Logger::LogCodeInfo() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
+#if V8_TARGET_ARCH_IA32
+  const char arch[] = "ia32";
+#elif V8_TARGET_ARCH_X64
+  const char arch[] = "x64";
+#elif V8_TARGET_ARCH_ARM
+  const char arch[] = "arm";
+#else
+  const char arch[] = "unknown";
+#endif
+  LogMessageBuilder msg;
+  msg.Append("code-info,%s,%d\n", arch, Code::kHeaderSize);
+  msg.WriteToLogFile();
+#endif  // ENABLE_LOGGING_AND_PROFILING
+}
+
+
+void Logger::LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg) {
+  if (!FLAG_ll_prof || Log::output_code_handle_ == NULL) return;
+  int pos = static_cast<int>(ftell(Log::output_code_handle_));
+  fwrite(code->instruction_start(), 1, code->instruction_size(),
+         Log::output_code_handle_);
+  msg->Append(",%d", pos);
+}
+
+
 void Logger::LogCodeObjects() {
   AssertNoAllocation no_alloc;
   HeapIterator iterator;
@@ -1451,6 +1495,12 @@
   // --prof implies --log-code.
   if (FLAG_prof) FLAG_log_code = true;
 
+  // --ll-prof implies --log-code and --log-snapshot-positions.
+  if (FLAG_ll_prof) {
+    FLAG_log_code = true;
+    FLAG_log_snapshot_positions = true;
+  }
+
   // --prof_lazy controls --log-code, implies --noprof_auto.
   if (FLAG_prof_lazy) {
     FLAG_log_code = false;
@@ -1512,6 +1562,8 @@
 
   ASSERT(VMState::is_outermost_external());
 
+  if (FLAG_ll_prof) LogCodeInfo();
+
   ticker_ = new Ticker(kSamplingIntervalMs);
 
   if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
diff --git a/src/log.h b/src/log.h
index e513737..3a4d79b 100644
--- a/src/log.h
+++ b/src/log.h
@@ -91,6 +91,7 @@
   V(CODE_CREATION_EVENT,            "code-creation",          "cc")       \
   V(CODE_MOVE_EVENT,                "code-move",              "cm")       \
   V(CODE_DELETE_EVENT,              "code-delete",            "cd")       \
+  V(CODE_MOVING_GC,                 "code-moving-gc",         "cg")       \
   V(FUNCTION_CREATION_EVENT,        "function-creation",      "fc")       \
   V(FUNCTION_MOVE_EVENT,            "function-move",          "fm")       \
   V(FUNCTION_DELETE_EVENT,          "function-delete",        "fd")       \
@@ -209,6 +210,7 @@
   static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name,
                               String* source, int line);
   static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
+  static void CodeMovingGCEvent();
   // Emits a code create event for a RegExp.
   static void RegExpCodeCreateEvent(Code* code, String* source);
   // Emits a code move event.
@@ -217,8 +219,7 @@
   static void CodeDeleteEvent(Address from);
   // Emits a function object create event.
   static void FunctionCreateEvent(JSFunction* function);
-  static void FunctionCreateEventFromMove(JSFunction* function,
-                                          HeapObject*);
+  static void FunctionCreateEventFromMove(JSFunction* function);
   // Emits a function move event.
   static void FunctionMoveEvent(Address from, Address to);
   // Emits a function delete event.
@@ -317,6 +318,12 @@
   // Used for logging stubs found in the snapshot.
   static void LogCodeObject(Object* code_object);
 
+  // Emits general information about generated code.
+  static void LogCodeInfo();
+
+  // Handles code creation when low-level profiling is active.
+  static void LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg);
+
   // Emits a profiler tick event. Used by the profiler thread.
   static void TickEvent(TickSample* sample, bool overflow);
 
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 26f88cf..ad928ea 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -282,6 +282,11 @@
                                          FixedArray::BodyDescriptor,
                                          void>::Visit);
 
+    table_.Register(kVisitGlobalContext,
+                    &FixedBodyVisitor<StaticMarkingVisitor,
+                                      Context::MarkCompactBodyDescriptor,
+                                      void>::Visit);
+
     table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
 
     table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
@@ -578,6 +583,7 @@
     VisitPointers(SLOT_ADDR(object,
                             JSFunction::kCodeEntryOffset + kPointerSize),
                   SLOT_ADDR(object, JSFunction::kSize));
+
 #undef SLOT_ADDR
   }
 
@@ -738,6 +744,21 @@
 };
 
 
+// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
+// are retained.
+class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+  virtual Object* RetainAs(Object* object) {
+    MapWord first_word = HeapObject::cast(object)->map_word();
+    if (first_word.IsMarked()) {
+      return object;
+    } else {
+      return NULL;
+    }
+  }
+};
+
+
 void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
   ASSERT(!object->IsMarked());
   ASSERT(Heap::Contains(object));
@@ -1069,6 +1090,10 @@
   ExternalStringTable::Iterate(&v);
   ExternalStringTable::CleanUp();
 
+  // Process the weak references.
+  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
+  Heap::ProcessWeakReferences(&mark_compact_object_retainer);
+
   // Remove object groups after marking phase.
   GlobalHandles::RemoveObjectGroups();
 }
@@ -1639,6 +1664,9 @@
     }
   }
 
+  // Update pointer from the global contexts list.
+  updating_visitor.VisitPointer(Heap::global_contexts_list_address());
+
   // Update pointers from external string table.
   Heap::UpdateNewSpaceReferencesInExternalStringTable(
       &UpdateNewSpaceReferenceInExternalStringTableEntry);
@@ -2245,6 +2273,9 @@
   Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
   GlobalHandles::IterateWeakRoots(&updating_visitor);
 
+  // Update the pointer to the head of the weak list of global contexts.
+  updating_visitor.VisitPointer(&Heap::global_contexts_list_);
+
   int live_maps_size = IterateLiveObjects(Heap::map_space(),
                                           &UpdatePointersInOldObject);
   int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
@@ -2522,7 +2553,7 @@
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   if (copied_to->IsJSFunction()) {
     PROFILE(FunctionMoveEvent(old_addr, new_addr));
-    PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to), obj));
+    PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to)));
   }
   HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
 
@@ -2615,7 +2646,7 @@
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   if (copied_to->IsJSFunction()) {
     PROFILE(FunctionMoveEvent(old_addr, new_addr));
-    PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to), obj));
+    PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to)));
   }
   HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index f63d672..11f9d34 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -844,15 +844,6 @@
 }
 
 
-int Failure::requested() const {
-  const int kShiftBits =
-      kFailureTypeTagSize + kSpaceTagSize - kObjectAlignmentBits;
-  STATIC_ASSERT(kShiftBits >= 0);
-  ASSERT(type() == RETRY_AFTER_GC);
-  return static_cast<int>(value() >> kShiftBits);
-}
-
-
 AllocationSpace Failure::allocation_space() const {
   ASSERT_EQ(RETRY_AFTER_GC, type());
   return static_cast<AllocationSpace>((value() >> kFailureTypeTagSize)
@@ -881,20 +872,14 @@
 }
 
 
-Failure* Failure::RetryAfterGC(int requested_bytes) {
-  // Assert that the space encoding fits in the three bytes allotted for it.
-  ASSERT((LAST_SPACE & ~kSpaceTagMask) == 0);
-  uintptr_t requested =
-      static_cast<uintptr_t>(requested_bytes >> kObjectAlignmentBits);
-  int tag_bits = kSpaceTagSize + kFailureTypeTagSize + kFailureTagSize;
-  if (((requested << tag_bits) >> tag_bits) != requested) {
-    // No room for entire requested size in the bits. Round down to
-    // maximally representable size.
-    requested = static_cast<intptr_t>(
-                    (~static_cast<uintptr_t>(0)) >> (tag_bits + 1));
-  }
-  int value = static_cast<int>(requested << kSpaceTagSize) | NEW_SPACE;
-  return Construct(RETRY_AFTER_GC, value);
+Failure* Failure::RetryAfterGC() {
+  return RetryAfterGC(NEW_SPACE);
+}
+
+
+Failure* Failure::RetryAfterGC(AllocationSpace space) {
+  ASSERT((space & ~kSpaceTagMask) == 0);
+  return Construct(RETRY_AFTER_GC, space);
 }
 
 
@@ -1485,6 +1470,15 @@
 }
 
 
+void FixedArray::set_unchecked(int index,
+                               Object* value,
+                               WriteBarrierMode mode) {
+  int offset = kHeaderSize + index * kPointerSize;
+  WRITE_FIELD(this, offset, value);
+  CONDITIONAL_WRITE_BARRIER(this, offset, mode);
+}
+
+
 void FixedArray::set_null_unchecked(int index) {
   ASSERT(index >= 0 && index < this->length());
   ASSERT(!Heap::InNewSpace(Heap::null_value()));
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 90f7ce0..ed76cb9 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -50,6 +50,7 @@
     kVisitShortcutCandidate,
     kVisitByteArray,
     kVisitFixedArray,
+    kVisitGlobalContext,
 
     // For data objects, JS objects and structs along with generic visitor which
     // can visit object of any size we provide visitors specialized by
@@ -263,6 +264,11 @@
                                          FixedArray::BodyDescriptor,
                                          int>::Visit);
 
+    table_.Register(kVisitGlobalContext,
+                    &FixedBodyVisitor<StaticVisitor,
+                                      Context::ScavengeBodyDescriptor,
+                                      int>::Visit);
+
     table_.Register(kVisitByteArray, &VisitByteArray);
 
     table_.Register(kVisitSharedFunctionInfo,
diff --git a/src/objects.cc b/src/objects.cc
index 59ed1de..ac20b2e 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -574,28 +574,6 @@
 }
 
 
-Failure* Failure::RetryAfterGC(int requested_bytes, AllocationSpace space) {
-  ASSERT((space & ~kSpaceTagMask) == 0);
-  // TODO(X64): Stop using Smi validation for non-smi checks, even if they
-  // happen to be identical at the moment.
-
-  int requested = requested_bytes >> kObjectAlignmentBits;
-  int value = (requested << kSpaceTagSize) | space;
-  // We can't very well allocate a heap number in this situation, and if the
-  // requested memory is so large it seems reasonable to say that this is an
-  // out of memory situation.  This fixes a crash in
-  // js1_5/Regress/regress-303213.js.
-  if (value >> kSpaceTagSize != requested ||
-      !Smi::IsValid(value) ||
-      value != ((value << kFailureTypeTagSize) >> kFailureTypeTagSize) ||
-      !Smi::IsValid(value << kFailureTypeTagSize)) {
-    Top::context()->mark_out_of_memory();
-    return Failure::OutOfMemoryException();
-  }
-  return Construct(RETRY_AFTER_GC, value);
-}
-
-
 // Should a word be prefixed by 'a' or 'an' in order to read naturally in
 // English?  Returns false for non-ASCII or words that don't start with
 // a capital letter.  The a/an rule follows pronunciation in English.
@@ -8591,7 +8569,9 @@
   details = PropertyDetails(details.attributes(),
                             details.type(),
                             DetailsAt(entry).index());
-  SetEntry(entry, NumberDictionaryShape::AsObject(key), value, details);
+  Object* object_key = NumberDictionaryShape::AsObject(key);
+  if (object_key->IsFailure()) return object_key;
+  SetEntry(entry, object_key, value, details);
   return this;
 }
 
diff --git a/src/objects.h b/src/objects.h
index e284454..d917a57 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -794,7 +794,7 @@
 //
 // Failures are a single word, encoded as follows:
 // +-------------------------+---+--+--+
-// |...rrrrrrrrrrrrrrrrrrrrrr|sss|tt|11|
+// |.........unused..........|sss|tt|11|
 // +-------------------------+---+--+--+
 //                          7 6 4 32 10
 //
@@ -810,11 +810,6 @@
 // allocation space tag is 000 for all failure types except
 // RETRY_AFTER_GC.  For RETRY_AFTER_GC, the possible values are the
 // allocation spaces (the encoding is found in globals.h).
-//
-// The remaining bits is the size of the allocation request in units
-// of the pointer size, and is zeroed except for RETRY_AFTER_GC
-// failures.  The 25 bits (on a 32 bit platform) gives a representable
-// range of 2^27 bytes (128MB).
 
 // Failure type tag info.
 const int kFailureTypeTagSize = 2;
@@ -836,15 +831,11 @@
   // Returns the space that needs to be collected for RetryAfterGC failures.
   inline AllocationSpace allocation_space() const;
 
-  // Returns the number of bytes requested (up to the representable maximum)
-  // for RetryAfterGC failures.
-  inline int requested() const;
-
   inline bool IsInternalError() const;
   inline bool IsOutOfMemoryException() const;
 
-  static Failure* RetryAfterGC(int requested_bytes, AllocationSpace space);
-  static inline Failure* RetryAfterGC(int requested_bytes);  // NEW_SPACE
+  static inline Failure* RetryAfterGC(AllocationSpace space);
+  static inline Failure* RetryAfterGC();  // NEW_SPACE
   static inline Failure* Exception();
   static inline Failure* InternalError();
   static inline Failure* OutOfMemoryException();
@@ -1760,6 +1751,7 @@
   // Setters with less debug checks for the GC to use.
   inline void set_unchecked(int index, Smi* value);
   inline void set_null_unchecked(int index);
+  inline void set_unchecked(int index, Object* value, WriteBarrierMode mode);
 
   // Gives access to raw memory which stores the array's data.
   inline Object** data_start();
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index ae44944..1003de1 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -291,6 +291,10 @@
 }
 
 
+void OS::SignalCodeMovingGC() {
+}
+
+
 int OS::StackWalk(Vector<OS::StackFrame> frames) {
   int frames_size = frames.length();
   ScopedVector<void*> addresses(frames_size);
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index f7d8609..c01c0d2 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -397,6 +397,30 @@
 }
 
 
+static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
+
+
+void OS::SignalCodeMovingGC() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // Support for ll_prof.py.
+  //
+  // The Linux profiler built into the kernel logs all mmap's with
+  // PROT_EXEC so that analysis tools can properly attribute ticks. We
+  // do a mmap with a name known by ll_prof.py and immediately munmap
+  // it. This injects a GC marker into the stream of events generated
+  // by the kernel and allows us to synchronize V8 code log and the
+  // kernel log.
+  int size = sysconf(_SC_PAGESIZE);
+  FILE* f = fopen(kGCFakeMmap, "w+");
+  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
+                    fileno(f), 0);
+  ASSERT(addr != MAP_FAILED);
+  munmap(addr, size);
+  fclose(f);
+#endif
+}
+
+
 int OS::StackWalk(Vector<OS::StackFrame> frames) {
   // backtrace is a glibc extension.
 #ifdef __GLIBC__
@@ -748,6 +772,7 @@
   USE(info);
   if (signal != SIGPROF) return;
   if (active_sampler_ == NULL) return;
+  if (!IsVmThread()) return;
 
   TickSample sample_obj;
   TickSample* sample = CpuProfiler::TickSampleEvent();
@@ -755,6 +780,7 @@
 
   // We always sample the VM state.
   sample->state = VMState::current_state();
+
   // If profiling, we extract the current pc and sp.
   if (active_sampler_->IsProfiling()) {
     // Extracting the sample from the context is extremely machine dependent.
@@ -783,9 +809,7 @@
     // Implement this on MIPS.
     UNIMPLEMENTED();
 #endif
-    if (IsVmThread()) {
-      active_sampler_->SampleStack(sample);
-    }
+    active_sampler_->SampleStack(sample);
   }
 
   active_sampler_->Tick(sample);
@@ -806,7 +830,10 @@
 
 
 Sampler::Sampler(int interval, bool profiling)
-    : interval_(interval), profiling_(profiling), active_(false) {
+    : interval_(interval),
+      profiling_(profiling),
+      synchronous_(profiling),
+      active_(false) {
   data_ = new PlatformData();
 }
 
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 47193de..3e4daf3 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -245,6 +245,10 @@
 }
 
 
+void OS::SignalCodeMovingGC() {
+}
+
+
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
   // MacOSX requires all these to install so we can assume they are present.
   // These constants are defined by the CPUid instructions.
@@ -549,17 +553,24 @@
 
   // Sampler thread handler.
   void Runner() {
-    // Loop until the sampler is disengaged, keeping the specified samling freq.
+    // Loop until the sampler is disengaged, keeping the specified
+    // sampling frequency.
     for ( ; sampler_->IsActive(); OS::Sleep(sampler_->interval_)) {
       TickSample sample_obj;
       TickSample* sample = CpuProfiler::TickSampleEvent();
       if (sample == NULL) sample = &sample_obj;
 
+      // If the sampler runs in sync with the JS thread, we try to
+      // suspend it. If we fail, we skip the current sample.
+      if (sampler_->IsSynchronous()) {
+        if (KERN_SUCCESS != thread_suspend(profiled_thread_)) continue;
+      }
+
       // We always sample the VM state.
       sample->state = VMState::current_state();
+
       // If profiling, we record the pc and sp of the profiled thread.
-      if (sampler_->IsProfiling()
-          && KERN_SUCCESS == thread_suspend(profiled_thread_)) {
+      if (sampler_->IsProfiling()) {
 #if V8_HOST_ARCH_X64
         thread_state_flavor_t flavor = x86_THREAD_STATE64;
         x86_thread_state64_t state;
@@ -591,11 +602,14 @@
           sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
           sampler_->SampleStack(sample);
         }
-        thread_resume(profiled_thread_);
       }
 
       // Invoke tick handler with program counter and stack pointer.
       sampler_->Tick(sample);
+
+      // If the sampler runs in sync with the JS thread, we have to
+      // remember to resume it.
+      if (sampler_->IsSynchronous()) thread_resume(profiled_thread_);
     }
   }
 };
@@ -613,7 +627,10 @@
 
 
 Sampler::Sampler(int interval, bool profiling)
-    : interval_(interval), profiling_(profiling), active_(false) {
+    : interval_(interval),
+      profiling_(profiling),
+      synchronous_(profiling),
+      active_(false) {
   data_ = new PlatformData(this);
 }
 
@@ -624,9 +641,9 @@
 
 
 void Sampler::Start() {
-  // If we are profiling, we need to be able to access the calling
-  // thread.
-  if (IsProfiling()) {
+  // If we are starting a synchronous sampler, we need to be able to
+  // access the calling thread.
+  if (IsSynchronous()) {
     data_->profiled_thread_ = mach_thread_self();
   }
 
@@ -655,7 +672,7 @@
   pthread_join(data_->sampler_thread_, NULL);
 
   // Deallocate Mach port for thread.
-  if (IsProfiling()) {
+  if (IsSynchronous()) {
     mach_port_deallocate(data_->task_self_, data_->profiled_thread_);
   }
 }
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index b8392e8..b5caa5e 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -240,6 +240,11 @@
 }
 
 
+void OS::SignalCodeMovingGC() {
+  UNIMPLEMENTED();
+}
+
+
 int OS::StackWalk(Vector<OS::StackFrame> frames) {
   UNIMPLEMENTED();
   return 0;
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 05ed9ee..e03059a 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -289,6 +289,10 @@
 }
 
 
+void OS::SignalCodeMovingGC() {
+}
+
+
 int OS::StackWalk(Vector<OS::StackFrame> frames) {
   UNIMPLEMENTED();
   return 1;
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 6d97ed7..fcd69de 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -256,6 +256,10 @@
 }
 
 
+void OS::SignalCodeMovingGC() {
+}
+
+
 struct StackWalker {
   Vector<OS::StackFrame>& frames;
   int index;
@@ -598,7 +602,10 @@
 
 
 Sampler::Sampler(int interval, bool profiling)
-    : interval_(interval), profiling_(profiling), active_(false) {
+    : interval_(interval),
+      profiling_(profiling),
+      synchronous_(profiling),
+      active_(false) {
   data_ = new PlatformData();
 }
 
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 86314a8..caea16c 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -845,14 +845,15 @@
                    bool is_executable) {
   // The address range used to randomize RWX allocations in OS::Allocate
   // Try not to map pages into the default range that windows loads DLLs
+  // Use a multiple of 64k to prevent committing unused memory.
   // Note: This does not guarantee RWX regions will be within the
   // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
 #ifdef V8_HOST_ARCH_64_BIT
   static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
-  static const intptr_t kAllocationRandomAddressMax = 0x000004FFFFFFFFFF;
+  static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
 #else
   static const intptr_t kAllocationRandomAddressMin = 0x04000000;
-  static const intptr_t kAllocationRandomAddressMax = 0x4FFFFFFF;
+  static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
 #endif
 
   // VirtualAlloc rounds allocated size to page size automatically.
@@ -1217,6 +1218,10 @@
 }
 
 
+void OS::SignalCodeMovingGC() {
+}
+
+
 // Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll
 
 // Switch off warning 4748 (/GS can not protect parameters and local variables
@@ -1838,17 +1843,25 @@
     // Context used for sampling the register state of the profiled thread.
     CONTEXT context;
     memset(&context, 0, sizeof(context));
-    // Loop until the sampler is disengaged, keeping the specified samling freq.
+    // Loop until the sampler is disengaged, keeping the specified
+    // sampling frequency.
     for ( ; sampler_->IsActive(); Sleep(sampler_->interval_)) {
       TickSample sample_obj;
       TickSample* sample = CpuProfiler::TickSampleEvent();
       if (sample == NULL) sample = &sample_obj;
 
+      // If the sampler runs in sync with the JS thread, we try to
+      // suspend it. If we fail, we skip the current sample.
+      if (sampler_->IsSynchronous()) {
+        static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+        if (SuspendThread(profiled_thread_) == kSuspendFailed) continue;
+      }
+
       // We always sample the VM state.
       sample->state = VMState::current_state();
+
       // If profiling, we record the pc and sp of the profiled thread.
-      if (sampler_->IsProfiling()
-          && SuspendThread(profiled_thread_) != (DWORD)-1) {
+      if (sampler_->IsProfiling()) {
         context.ContextFlags = CONTEXT_FULL;
         if (GetThreadContext(profiled_thread_, &context) != 0) {
 #if V8_HOST_ARCH_X64
@@ -1862,11 +1875,14 @@
 #endif
           sampler_->SampleStack(sample);
         }
-        ResumeThread(profiled_thread_);
       }
 
       // Invoke tick handler with program counter and stack pointer.
       sampler_->Tick(sample);
+
+      // If the sampler runs in sync with the JS thread, we have to
+      // remember to resume it.
+      if (sampler_->IsSynchronous()) ResumeThread(profiled_thread_);
     }
   }
 };
@@ -1883,7 +1899,10 @@
 
 // Initialize a profile sampler.
 Sampler::Sampler(int interval, bool profiling)
-    : interval_(interval), profiling_(profiling), active_(false) {
+    : interval_(interval),
+      profiling_(profiling),
+      synchronous_(profiling),
+      active_(false) {
   data_ = new PlatformData(this);
 }
 
@@ -1895,9 +1914,9 @@
 
 // Start profiling.
 void Sampler::Start() {
-  // If we are profiling, we need to be able to access the calling
-  // thread.
-  if (IsProfiling()) {
+  // If we are starting a synchronous sampler, we need to be able to
+  // access the calling thread.
+  if (IsSynchronous()) {
     // Get a handle to the calling thread. This is the thread that we are
     // going to profile. We need to make a copy of the handle because we are
     // going to use it in the sampler thread. Using GetThreadHandle() will
diff --git a/src/platform.h b/src/platform.h
index e9e7c22..42e6eae 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -257,11 +257,16 @@
   static char* StrChr(char* str, int c);
   static void StrNCpy(Vector<char> dest, const char* src, size_t n);
 
-  // Support for profiler.  Can do nothing, in which case ticks
-  // occuring in shared libraries will not be properly accounted
-  // for.
+  // Support for the profiler.  Can do nothing, in which case ticks
+  // occuring in shared libraries will not be properly accounted for.
   static void LogSharedLibraryAddresses();
 
+  // Support for the profiler.  Notifies the external profiling
+  // process that a code moving garbage collection starts.  Can do
+  // nothing, in which case the code objects must not move (e.g., by
+  // using --never-compact) if accurate profiling is desired.
+  static void SignalCodeMovingGC();
+
   // The return value indicates the CPU features we are sure of because of the
   // OS.  For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
   // instructions.
@@ -563,17 +568,24 @@
   void Start();
   void Stop();
 
-  // Is the sampler used for profiling.
-  inline bool IsProfiling() { return profiling_; }
+  // Is the sampler used for profiling?
+  bool IsProfiling() const { return profiling_; }
+
+  // Is the sampler running in sync with the JS thread? On platforms
+  // where the sampler is implemented with a thread that wakes up
+  // every now and then, having a synchronous sampler implies
+  // suspending/resuming the JS thread.
+  bool IsSynchronous() const { return synchronous_; }
 
   // Whether the sampler is running (that is, consumes resources).
-  inline bool IsActive() { return active_; }
+  bool IsActive() const { return active_; }
 
   class PlatformData;
 
  private:
   const int interval_;
   const bool profiling_;
+  const bool synchronous_;
   bool active_;
   PlatformData* data_;  // Platform specific data.
   DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
diff --git a/src/regexp-macro-assembler-irregexp.cc b/src/regexp-macro-assembler-irregexp.cc
index 90abe91..6fbb14a 100644
--- a/src/regexp-macro-assembler-irregexp.cc
+++ b/src/regexp-macro-assembler-irregexp.cc
@@ -145,6 +145,12 @@
 }
 
 
+void RegExpMacroAssemblerIrregexp::SetCurrentPositionFromEnd(int by) {
+  ASSERT(is_uint24(by));
+  Emit(BC_SET_CURRENT_POSITION_FROM_END, by);
+}
+
+
 void RegExpMacroAssemblerIrregexp::SetRegister(int register_index, int to) {
   ASSERT(register_index >= 0);
   ASSERT(register_index <= kMaxRegister);
diff --git a/src/regexp-macro-assembler-irregexp.h b/src/regexp-macro-assembler-irregexp.h
index 3ddbc2f..6c9c2eb 100644
--- a/src/regexp-macro-assembler-irregexp.h
+++ b/src/regexp-macro-assembler-irregexp.h
@@ -65,6 +65,7 @@
   virtual void PushRegister(int register_index,
                             StackCheckFlag check_stack_limit);
   virtual void AdvanceRegister(int reg, int by);  // r[reg] += by.
+  virtual void SetCurrentPositionFromEnd(int by);
   virtual void SetRegister(int register_index, int to);
   virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
   virtual void ClearRegisters(int reg_from, int reg_to);
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index 41c674b..463c1a8 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -136,6 +136,12 @@
 }
 
 
+void RegExpMacroAssemblerTracer::SetCurrentPositionFromEnd(int by) {
+  PrintF(" SetCurrentPositionFromEnd(by=%d);\n", by);
+  assembler_->SetCurrentPositionFromEnd(by);
+}
+
+
 void RegExpMacroAssemblerTracer::SetRegister(int register_index, int to) {
   PrintF(" SetRegister(register=%d, to=%d);\n", register_index, to);
   assembler_->SetRegister(register_index, to);
diff --git a/src/regexp-macro-assembler-tracer.h b/src/regexp-macro-assembler-tracer.h
index 9608f9e..6a8f4d4 100644
--- a/src/regexp-macro-assembler-tracer.h
+++ b/src/regexp-macro-assembler-tracer.h
@@ -89,6 +89,7 @@
                             StackCheckFlag check_stack_limit);
   virtual void ReadCurrentPositionFromRegister(int reg);
   virtual void ReadStackPointerFromRegister(int reg);
+  virtual void SetCurrentPositionFromEnd(int by);
   virtual void SetRegister(int register_index, int to);
   virtual void Succeed();
   virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index 652b690..dc3bd82 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -155,6 +155,7 @@
                             StackCheckFlag check_stack_limit) = 0;
   virtual void ReadCurrentPositionFromRegister(int reg) = 0;
   virtual void ReadStackPointerFromRegister(int reg) = 0;
+  virtual void SetCurrentPositionFromEnd(int by) = 0;
   virtual void SetRegister(int register_index, int to) = 0;
   virtual void Succeed() = 0;
   virtual void WriteCurrentPositionToRegister(int reg, int cp_offset) = 0;
diff --git a/src/runtime.cc b/src/runtime.cc
index c80f1fc..9a604a0 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -6703,7 +6703,7 @@
 
 
 static Object* Runtime_StackGuard(Arguments args) {
-  ASSERT(args.length() == 1);
+  ASSERT(args.length() == 0);
 
   // First check if this is a real stack overflow.
   if (StackGuard::IsStackOverflow()) {
@@ -10153,7 +10153,7 @@
   if (failure->IsRetryAfterGC()) {
     // Try to do a garbage collection; ignore it if it fails. The C
     // entry stub will throw an out-of-memory exception in that case.
-    Heap::CollectGarbage(failure->requested(), failure->allocation_space());
+    Heap::CollectGarbage(failure->allocation_space());
   } else {
     // Handle last resort GC and make sure to allow future allocations
     // to grow the heap without causing GCs (if possible).
diff --git a/src/runtime.h b/src/runtime.h
index 19f4144..2cd95c4 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -267,7 +267,7 @@
   F(Throw, 1, 1) \
   F(ReThrow, 1, 1) \
   F(ThrowReferenceError, 1, 1) \
-  F(StackGuard, 1, 1) \
+  F(StackGuard, 0, 1) \
   F(PromoteScheduledException, 0, 1) \
   \
   /* Contexts */ \
diff --git a/src/serialize.cc b/src/serialize.cc
index cde7577..ccba737 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -500,7 +500,7 @@
 
 
 ExternalReferenceDecoder::ExternalReferenceDecoder()
-  : encodings_(NewArray<Address*>(kTypeCodeCount)) {
+    : encodings_(NewArray<Address*>(kTypeCodeCount)) {
   ExternalReferenceTable* external_references =
       ExternalReferenceTable::instance();
   for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
@@ -619,6 +619,8 @@
   external_reference_decoder_ = new ExternalReferenceDecoder();
   Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
   Heap::IterateWeakRoots(this, VISIT_ALL);
+
+  Heap::set_global_contexts_list(Heap::undefined_value());
 }
 
 
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index fbb2673..8a0dd07 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -407,8 +407,7 @@
 
 bool PagedSpace::Contains(Address addr) {
   Page* p = Page::FromAddress(addr);
-  ASSERT(p->is_valid());
-
+  if (!p->is_valid()) return false;
   return MemoryAllocator::IsPageInSpace(p, this);
 }
 
@@ -440,7 +439,7 @@
   object = SlowAllocateRaw(size_in_bytes);
   if (object != NULL) return object;
 
-  return Failure::RetryAfterGC(size_in_bytes, identity());
+  return Failure::RetryAfterGC(identity());
 }
 
 
@@ -454,7 +453,7 @@
   object = SlowMCAllocateRaw(size_in_bytes);
   if (object != NULL) return object;
 
-  return Failure::RetryAfterGC(size_in_bytes, identity());
+  return Failure::RetryAfterGC(identity());
 }
 
 
@@ -475,7 +474,7 @@
 Object* NewSpace::AllocateRawInternal(int size_in_bytes,
                                       AllocationInfo* alloc_info) {
   Address new_top = alloc_info->top + size_in_bytes;
-  if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes);
+  if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
 
   Object* obj = HeapObject::FromAddress(alloc_info->top);
   alloc_info->top = new_top;
diff --git a/src/spaces.cc b/src/spaces.cc
index d824c30..5bdbcc7 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1828,7 +1828,7 @@
   if (cur == kEnd) {
     // No large enough size in list.
     *wasted_bytes = 0;
-    return Failure::RetryAfterGC(size_in_bytes, owner_);
+    return Failure::RetryAfterGC(owner_);
   }
   ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
   int rem = cur - index;
@@ -1926,7 +1926,7 @@
 
 Object* FixedSizeFreeList::Allocate() {
   if (head_ == NULL) {
-    return Failure::RetryAfterGC(object_size_, owner_);
+    return Failure::RetryAfterGC(owner_);
   }
 
   ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
@@ -2753,14 +2753,14 @@
   // Check if we want to force a GC before growing the old space further.
   // If so, fail the allocation.
   if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
-    return Failure::RetryAfterGC(requested_size, identity());
+    return Failure::RetryAfterGC(identity());
   }
 
   size_t chunk_size;
   LargeObjectChunk* chunk =
       LargeObjectChunk::New(requested_size, &chunk_size, executable);
   if (chunk == NULL) {
-    return Failure::RetryAfterGC(requested_size, identity());
+    return Failure::RetryAfterGC(identity());
   }
 
   size_ += static_cast<int>(chunk_size);
diff --git a/src/spaces.h b/src/spaces.h
index 2fdb96f..0e6a91e 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -2194,7 +2194,6 @@
   // if such a page doesn't exist.
   LargeObjectChunk* FindChunkContainingPc(Address pc);
 
-
   // Iterates objects covered by dirty regions.
   void IterateDirtyRegions(ObjectSlotCallback func);
 
diff --git a/src/strtod.cc b/src/strtod.cc
index 68444fc..ae278bd 100644
--- a/src/strtod.cc
+++ b/src/strtod.cc
@@ -85,12 +85,22 @@
 extern "C" double gay_strtod(const char* s00, const char** se);
 
 static double old_strtod(Vector<const char> buffer, int exponent) {
+  // gay_strtod is broken on Linux,x86. For numbers with few decimal digits
+  // the computation is done using floating-point operations which (on Linux)
+  // are prone to double-rounding errors.
+  // By adding several zeroes to the buffer gay_strtod falls back to a slower
+  // (but correct) algorithm.
+  const int kInsertedZeroesCount = 20;
   char gay_buffer[1024];
   Vector<char> gay_buffer_vector(gay_buffer, sizeof(gay_buffer));
   int pos = 0;
   for (int i = 0; i < buffer.length(); ++i) {
     gay_buffer_vector[pos++] = buffer[i];
   }
+  for (int i = 0; i < kInsertedZeroesCount; ++i) {
+    gay_buffer_vector[pos++] = '0';
+  }
+  exponent -= kInsertedZeroesCount;
   gay_buffer_vector[pos++] = 'e';
   if (exponent < 0) {
     gay_buffer_vector[pos++] = '-';
@@ -139,13 +149,18 @@
 }
 
 
-double Strtod(Vector<const char> buffer, int exponent) {
-  Vector<const char> left_trimmed = TrimLeadingZeros(buffer);
-  Vector<const char> trimmed = TrimTrailingZeros(left_trimmed);
-  exponent += left_trimmed.length() - trimmed.length();
-  if (trimmed.length() == 0) return 0.0;
-  if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) return V8_INFINITY;
-  if (exponent + trimmed.length() <= kMinDecimalPower) return 0.0;
+static bool DoubleStrtod(Vector<const char> trimmed,
+                         int exponent,
+                         double* result) {
+#if (defined(V8_TARGET_ARCH_IA32) || defined(USE_SIMULATOR)) && !defined(WIN32)
+  // On x86 the floating-point stack can be 64 or 80 bits wide. If it is
+  // 80 bits wide (as is the case on Linux) then double-rounding occurs and the
+  // result is not accurate.
+  // We know that Windows32 uses 64 bits and is therefore accurate.
+  // Note that the ARM simulator is compiled for 32bits. It therefore exhibits
+  // the same problem.
+  return false;
+#endif
   if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
     // The trimmed input fits into a double.
     // If the 10^exponent (resp. 10^-exponent) fits into a double too then we
@@ -155,13 +170,15 @@
     // return the best possible approximation.
     if (exponent < 0 && -exponent < kExactPowersOfTenSize) {
       // 10^-exponent fits into a double.
-      double buffer_d = static_cast<double>(ReadUint64(trimmed));
-      return buffer_d / exact_powers_of_ten[-exponent];
+      *result = static_cast<double>(ReadUint64(trimmed));
+      *result /= exact_powers_of_ten[-exponent];
+      return true;
     }
     if (0 <= exponent && exponent < kExactPowersOfTenSize) {
       // 10^exponent fits into a double.
-      double buffer_d = static_cast<double>(ReadUint64(trimmed));
-      return buffer_d * exact_powers_of_ten[exponent];
+      *result = static_cast<double>(ReadUint64(trimmed));
+      *result *= exact_powers_of_ten[exponent];
+      return true;
     }
     int remaining_digits =
         kMaxExactDoubleIntegerDecimalDigits - trimmed.length();
@@ -170,11 +187,27 @@
       // The trimmed string was short and we can multiply it with
       // 10^remaining_digits. As a result the remaining exponent now fits
       // into a double too.
-      double buffer_d = static_cast<double>(ReadUint64(trimmed));
-      buffer_d *= exact_powers_of_ten[remaining_digits];
-      return buffer_d * exact_powers_of_ten[exponent - remaining_digits];
+      *result = static_cast<double>(ReadUint64(trimmed));
+      *result *= exact_powers_of_ten[remaining_digits];
+      *result *= exact_powers_of_ten[exponent - remaining_digits];
+      return true;
     }
   }
+  return false;
+}
+
+
+double Strtod(Vector<const char> buffer, int exponent) {
+  Vector<const char> left_trimmed = TrimLeadingZeros(buffer);
+  Vector<const char> trimmed = TrimTrailingZeros(left_trimmed);
+  exponent += left_trimmed.length() - trimmed.length();
+  if (trimmed.length() == 0) return 0.0;
+  if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) return V8_INFINITY;
+  if (exponent + trimmed.length() <= kMinDecimalPower) return 0.0;
+  double result;
+  if (DoubleStrtod(trimmed, exponent, &result)) {
+    return result;
+  }
   return old_strtod(trimmed, exponent);
 }
 
diff --git a/src/top.cc b/src/top.cc
index 777f041..9ce6542 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -69,6 +69,9 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
   js_entry_sp_ = 0;
 #endif
+#ifdef ENABLE_VMSTATE_TRACKING
+  current_vm_state_ = NULL;
+#endif
   try_catch_handler_address_ = NULL;
   context_ = NULL;
   int id = ThreadManager::CurrentId();
diff --git a/src/top.h b/src/top.h
index 776c43e..a2ba3dd 100644
--- a/src/top.h
+++ b/src/top.h
@@ -41,6 +41,7 @@
 
 class SaveContext;  // Forward declaration.
 class ThreadVisitor;  // Defined in v8threads.h
+class VMState;  // Defined in vm-state.h
 
 class ThreadLocalTop BASE_EMBEDDED {
  public:
@@ -101,10 +102,15 @@
   // Stack.
   Address c_entry_fp_;  // the frame pointer of the top c entry frame
   Address handler_;   // try-blocks are chained through the stack
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   Address js_entry_sp_;  // the stack pointer of the bottom js entry frame
 #endif
 
+#ifdef ENABLE_VMSTATE_TRACKING
+  VMState* current_vm_state_;
+#endif
+
   // Generated code scratch locations.
   int32_t formal_count_;
 
@@ -254,6 +260,16 @@
   }
 #endif
 
+#ifdef ENABLE_VMSTATE_TRACKING
+  static VMState* current_vm_state() {
+    return thread_local_.current_vm_state_;
+  }
+
+  static void set_current_vm_state(VMState* state) {
+    thread_local_.current_vm_state_ = state;
+  }
+#endif
+
   // Generated code scratch locations.
   static void* formal_count_address() { return &thread_local_.formal_count_; }
 
diff --git a/src/version.cc b/src/version.cc
index 33b4977..6d98b04 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     5
-#define BUILD_NUMBER      0
+#define BUILD_NUMBER      1
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
index aa4cedb..74f4a6a 100644
--- a/src/vm-state-inl.h
+++ b/src/vm-state-inl.h
@@ -75,9 +75,9 @@
 #endif
   state_ = state;
   // Save the previous state.
-  previous_ = reinterpret_cast<VMState*>(current_state_);
+  previous_ = Top::current_vm_state();
   // Install the new state.
-  OS::ReleaseStore(&current_state_, reinterpret_cast<AtomicWord>(this));
+  Top::set_current_vm_state(this);
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log_state_changes) {
@@ -106,7 +106,7 @@
 VMState::~VMState() {
   if (disabled_) return;
   // Return to the previous state.
-  OS::ReleaseStore(&current_state_, reinterpret_cast<AtomicWord>(previous_));
+  Top::set_current_vm_state(previous_);
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log_state_changes) {
diff --git a/src/vm-state.cc b/src/vm-state.cc
deleted file mode 100644
index 6bd737d..0000000
--- a/src/vm-state.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "vm-state.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_VMSTATE_TRACKING
-AtomicWord VMState::current_state_ = 0;
-#endif
-
-} }  // namespace v8::internal
diff --git a/src/vm-state.h b/src/vm-state.h
index 080eb8d..cc91e83 100644
--- a/src/vm-state.h
+++ b/src/vm-state.h
@@ -28,6 +28,8 @@
 #ifndef V8_VM_STATE_H_
 #define V8_VM_STATE_H_
 
+#include "top.h"
+
 namespace v8 {
 namespace internal {
 
@@ -44,16 +46,16 @@
 
   // Used for debug asserts.
   static bool is_outermost_external() {
-    return current_state_ == 0;
+    return Top::current_vm_state() == 0;
   }
 
   static StateTag current_state() {
-    VMState* state = reinterpret_cast<VMState*>(current_state_);
+    VMState* state = Top::current_vm_state();
     return state ? state->state() : EXTERNAL;
   }
 
   static Address external_callback() {
-    VMState* state = reinterpret_cast<VMState*>(current_state_);
+    VMState* state = Top::current_vm_state();
     return state ? state->external_callback_ : NULL;
   }
 
@@ -63,8 +65,6 @@
   VMState* previous_;
   Address external_callback_;
 
-  // A stack of VM states.
-  static AtomicWord current_state_;
 #else
  public:
   explicit VMState(StateTag state) {}
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index d592037..2d87667 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -2123,7 +2123,7 @@
     __ JumpIfNotBothSmi(rax, rdx, &non_smi);
     __ subq(rdx, rax);
     __ j(no_overflow, &smi_done);
-    __ neg(rdx);  // Correct sign in case of overflow.
+    __ not_(rdx);  // Correct sign in case of overflow. rdx cannot be 0 here.
     __ bind(&smi_done);
     __ movq(rax, rdx);
     __ ret(0);
@@ -2394,16 +2394,7 @@
 
 
 void StackCheckStub::Generate(MacroAssembler* masm) {
-  // Because builtins always remove the receiver from the stack, we
-  // have to fake one to avoid underflowing the stack. The receiver
-  // must be inserted below the return address on the stack so we
-  // temporarily store that in a register.
-  __ pop(rax);
-  __ Push(Smi::FromInt(0));
-  __ push(rax);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
+  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
 }
 
 
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 1bd3443..cb91067 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -101,9 +101,9 @@
   // dirty. |object| is the object being stored into, |value| is the
   // object being stored. If |offset| is zero, then the |scratch|
   // register contains the array index into the elements array
-  // represented as a Smi. All registers are clobbered by the
-  // operation. RecordWrite filters out smis so it does not update the
-  // write barrier if the value is a smi.
+  // represented as an untagged 32-bit integer. All registers are
+  // clobbered by the operation. RecordWrite filters out smis so it
+  // does not update the write barrier if the value is a smi.
   void RecordWrite(Register object,
                    int offset,
                    Register value,
@@ -122,7 +122,7 @@
   // The value is known to not be a smi.
   // object is the object being stored into, value is the object being stored.
   // If offset is zero, then the scratch register contains the array index into
-  // the elements array represented as a Smi.
+  // the elements array represented as an untagged 32-bit integer.
   // All registers are clobbered by the operation.
   void RecordWriteNonSmi(Register object,
                          int offset,
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 91e2b44..47c19c7 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -145,7 +145,6 @@
 
 void RegExpMacroAssemblerX64::AdvanceCurrentPosition(int by) {
   if (by != 0) {
-    Label inside_string;
     __ addq(rdi, Immediate(by * char_size()));
   }
 }
@@ -1053,6 +1052,19 @@
 }
 
 
+void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
+  NearLabel after_position;
+  __ cmpq(rdi, Immediate(-by * char_size()));
+  __ j(greater_equal, &after_position);
+  __ movq(rdi, Immediate(-by * char_size()));
+  // On RegExp code entry (where this operation is used), the character before
+  // the current position is expected to be already loaded.
+  // We have advanced the position, so it's safe to read backwards.
+  LoadCurrentCharacterUnchecked(-1, 1);
+  __ bind(&after_position);
+}
+
+
 void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
   ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
   __ movq(register_location(register_index), Immediate(to));
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 3bcc3ac..182bc55 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -93,6 +93,7 @@
                             StackCheckFlag check_stack_limit);
   virtual void ReadCurrentPositionFromRegister(int reg);
   virtual void ReadStackPointerFromRegister(int reg);
+  virtual void SetCurrentPositionFromEnd(int by);
   virtual void SetRegister(int register_index, int to);
   virtual void Succeed();
   virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc
index 68b757a..2936d6e 100644
--- a/test/cctest/test-alloc.cc
+++ b/test/cctest/test-alloc.cc
@@ -37,7 +37,7 @@
 
 static Object* AllocateAfterFailures() {
   static int attempts = 0;
-  if (++attempts < 3) return Failure::RetryAfterGC(0);
+  if (++attempts < 3) return Failure::RetryAfterGC();
 
   // New space.
   NewSpace* new_space = Heap::new_space();
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index af50d3d..fd44aec 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -431,8 +431,8 @@
     LocalContext env;
     Local<String> source = String::New(two_byte_source);
     // Trigger GCs so that the newly allocated string moves to old gen.
-    i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in survivor space now
-    i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in old gen now
+    i::Heap::CollectGarbage(i::NEW_SPACE);  // in survivor space now
+    i::Heap::CollectGarbage(i::NEW_SPACE);  // in old gen now
     bool success = source->MakeExternal(new TestResource(two_byte_source));
     CHECK(success);
     Local<Script> script = Script::Compile(source);
@@ -456,8 +456,8 @@
     LocalContext env;
     Local<String> source = v8_str(c_source);
     // Trigger GCs so that the newly allocated string moves to old gen.
-    i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in survivor space now
-    i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in old gen now
+    i::Heap::CollectGarbage(i::NEW_SPACE);  // in survivor space now
+    i::Heap::CollectGarbage(i::NEW_SPACE);  // in old gen now
     bool success = source->MakeExternal(
         new TestAsciiResource(i::StrDup(c_source)));
     CHECK(success);
@@ -479,8 +479,8 @@
   LocalContext env;
 
   // Free some space in the new space so that we can check freshness.
-  i::Heap::CollectGarbage(0, i::NEW_SPACE);
-  i::Heap::CollectGarbage(0, i::NEW_SPACE);
+  i::Heap::CollectGarbage(i::NEW_SPACE);
+  i::Heap::CollectGarbage(i::NEW_SPACE);
 
   uint16_t* two_byte_string = AsciiToTwoByteString("small");
   Local<String> small_string = String::New(two_byte_string);
@@ -489,8 +489,8 @@
   // We should refuse to externalize newly created small string.
   CHECK(!small_string->CanMakeExternal());
   // Trigger GCs so that the newly allocated string moves to old gen.
-  i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in survivor space now
-  i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in old gen now
+  i::Heap::CollectGarbage(i::NEW_SPACE);  // in survivor space now
+  i::Heap::CollectGarbage(i::NEW_SPACE);  // in old gen now
   // Old space strings should be accepted.
   CHECK(small_string->CanMakeExternal());
 
@@ -525,15 +525,15 @@
   LocalContext env;
 
   // Free some space in the new space so that we can check freshness.
-  i::Heap::CollectGarbage(0, i::NEW_SPACE);
-  i::Heap::CollectGarbage(0, i::NEW_SPACE);
+  i::Heap::CollectGarbage(i::NEW_SPACE);
+  i::Heap::CollectGarbage(i::NEW_SPACE);
 
   Local<String> small_string = String::New("small");
   // We should refuse to externalize newly created small string.
   CHECK(!small_string->CanMakeExternal());
   // Trigger GCs so that the newly allocated string moves to old gen.
-  i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in survivor space now
-  i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in old gen now
+  i::Heap::CollectGarbage(i::NEW_SPACE);  // in survivor space now
+  i::Heap::CollectGarbage(i::NEW_SPACE);  // in old gen now
   // Old space strings should be accepted.
   CHECK(small_string->CanMakeExternal());
 
@@ -565,8 +565,8 @@
         String::NewExternal(new TestResource(two_byte_string));
     i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
     // Trigger GCs so that the newly allocated string moves to old gen.
-    i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in survivor space now
-    i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in old gen now
+    i::Heap::CollectGarbage(i::NEW_SPACE);  // in survivor space now
+    i::Heap::CollectGarbage(i::NEW_SPACE);  // in old gen now
     i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
     CHECK(isymbol->IsSymbol());
   }
@@ -583,8 +583,8 @@
         new TestAsciiResource(i::StrDup(one_byte_string)));
     i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
     // Trigger GCs so that the newly allocated string moves to old gen.
-    i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in survivor space now
-    i::Heap::CollectGarbage(0, i::NEW_SPACE);  // in old gen now
+    i::Heap::CollectGarbage(i::NEW_SPACE);  // in survivor space now
+    i::Heap::CollectGarbage(i::NEW_SPACE);  // in old gen now
     i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
     CHECK(isymbol->IsSymbol());
   }
@@ -602,12 +602,12 @@
     Local<String> string =
         String::NewExternal(new TestResource(two_byte_string));
     i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
-    i::Heap::CollectGarbage(0, i::NEW_SPACE);
+    i::Heap::CollectGarbage(i::NEW_SPACE);
     in_new_space = i::Heap::InNewSpace(*istring);
     CHECK(in_new_space || i::Heap::old_data_space()->Contains(*istring));
     CHECK_EQ(0, TestResource::dispose_count);
   }
-  i::Heap::CollectGarbage(0, in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
+  i::Heap::CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
   CHECK_EQ(1, TestResource::dispose_count);
 }
 
@@ -621,12 +621,12 @@
     Local<String> string = String::NewExternal(
         new TestAsciiResource(i::StrDup(one_byte_string)));
     i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
-    i::Heap::CollectGarbage(0, i::NEW_SPACE);
+    i::Heap::CollectGarbage(i::NEW_SPACE);
     in_new_space = i::Heap::InNewSpace(*istring);
     CHECK(in_new_space || i::Heap::old_data_space()->Contains(*istring));
     CHECK_EQ(0, TestAsciiResource::dispose_count);
   }
-  i::Heap::CollectGarbage(0, in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
+  i::Heap::CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
   CHECK_EQ(1, TestAsciiResource::dispose_count);
 }
 
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index f5526ce..d59e2f5 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -867,7 +867,7 @@
     break_point_hit_count++;
     if (break_point_hit_count % 2 == 0) {
       // Scavenge.
-      Heap::CollectGarbage(0, v8::internal::NEW_SPACE);
+      Heap::CollectGarbage(v8::internal::NEW_SPACE);
     } else {
       // Mark sweep compact.
       Heap::CollectAllGarbage(true);
@@ -891,7 +891,7 @@
 
     // Run the garbage collector to enforce heap verification if option
     // --verify-heap is set.
-    Heap::CollectGarbage(0, v8::internal::NEW_SPACE);
+    Heap::CollectGarbage(v8::internal::NEW_SPACE);
 
     // Set the break flag again to come back here as soon as possible.
     v8::Debug::DebugBreak();
@@ -1322,7 +1322,7 @@
     CHECK_EQ(1 + i * 3, break_point_hit_count);
 
     // Scavenge and call function.
-    Heap::CollectGarbage(0, v8::internal::NEW_SPACE);
+    Heap::CollectGarbage(v8::internal::NEW_SPACE);
     f->Call(recv, 0, NULL);
     CHECK_EQ(2 + i * 3, break_point_hit_count);
 
diff --git a/test/cctest/test-decls.cc b/test/cctest/test-decls.cc
index 7587da8..88fa79b 100644
--- a/test/cctest/test-decls.cc
+++ b/test/cctest/test-decls.cc
@@ -130,7 +130,7 @@
   InitializeIfNeeded();
   // A retry after a GC may pollute the counts, so perform gc now
   // to avoid that.
-  v8::internal::Heap::CollectGarbage(0, v8::internal::NEW_SPACE);
+  v8::internal::Heap::CollectGarbage(v8::internal::NEW_SPACE);
   HandleScope scope;
   TryCatch catcher;
   catcher.SetVerbose(true);
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index eec024f..126ac21 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -177,13 +177,11 @@
   int request = 24;
   CHECK_EQ(request, static_cast<int>(OBJECT_POINTER_ALIGN(request)));
   CHECK(Smi::FromInt(42)->IsSmi());
-  CHECK(Failure::RetryAfterGC(request, NEW_SPACE)->IsFailure());
-  CHECK_EQ(request, Failure::RetryAfterGC(request, NEW_SPACE)->requested());
+  CHECK(Failure::RetryAfterGC(NEW_SPACE)->IsFailure());
   CHECK_EQ(NEW_SPACE,
-           Failure::RetryAfterGC(request, NEW_SPACE)->allocation_space());
+           Failure::RetryAfterGC(NEW_SPACE)->allocation_space());
   CHECK_EQ(OLD_POINTER_SPACE,
-           Failure::RetryAfterGC(request,
-                                 OLD_POINTER_SPACE)->allocation_space());
+           Failure::RetryAfterGC(OLD_POINTER_SPACE)->allocation_space());
   CHECK(Failure::Exception()->IsFailure());
   CHECK(Smi::FromInt(Smi::kMinValue)->IsSmi());
   CHECK(Smi::FromInt(Smi::kMaxValue)->IsSmi());
@@ -195,8 +193,7 @@
 
   v8::HandleScope sc;
   // Check GC.
-  int free_bytes = Heap::MaxObjectSizeInPagedSpace();
-  CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE));
+  Heap::CollectGarbage(NEW_SPACE);
 
   Handle<String> name = Factory::LookupAsciiSymbol("theFunction");
   Handle<String> prop_name = Factory::LookupAsciiSymbol("theSlot");
@@ -221,7 +218,7 @@
     CHECK_EQ(Smi::FromInt(24), obj->GetProperty(*prop_namex));
   }
 
-  CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE));
+  Heap::CollectGarbage(NEW_SPACE);
 
   // Function should be alive.
   CHECK(Top::context()->global()->HasLocalProperty(*name));
@@ -239,7 +236,7 @@
   }
 
   // After gc, it should survive.
-  CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE));
+  Heap::CollectGarbage(NEW_SPACE);
 
   CHECK(Top::context()->global()->HasLocalProperty(*obj_name));
   CHECK(Top::context()->global()->GetProperty(*obj_name)->IsJSObject());
@@ -301,7 +298,7 @@
   }
 
   // after gc, it should survive
-  CHECK(Heap::CollectGarbage(0, NEW_SPACE));
+  Heap::CollectGarbage(NEW_SPACE);
 
   CHECK((*h1)->IsString());
   CHECK((*h2)->IsHeapNumber());
@@ -382,8 +379,8 @@
     h2 = GlobalHandles::Create(*u);
   }
 
-  CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
-  CHECK(Heap::CollectGarbage(0, NEW_SPACE));
+  Heap::CollectGarbage(OLD_POINTER_SPACE);
+  Heap::CollectGarbage(NEW_SPACE);
   // Make sure the object is promoted.
 
   GlobalHandles::MakeWeak(h2.location(),
@@ -392,7 +389,7 @@
   CHECK(!GlobalHandles::IsNearDeath(h1.location()));
   CHECK(!GlobalHandles::IsNearDeath(h2.location()));
 
-  CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
+  Heap::CollectGarbage(OLD_POINTER_SPACE);
 
   CHECK((*h1)->IsString());
 
@@ -426,7 +423,7 @@
   CHECK(!WeakPointerCleared);
 
   // Mark-compact treats weak reference properly.
-  CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
+  Heap::CollectGarbage(OLD_POINTER_SPACE);
 
   CHECK(WeakPointerCleared);
 }
@@ -814,8 +811,7 @@
 TEST(LargeObjectSpaceContains) {
   InitializeVM();
 
-  int free_bytes = Heap::MaxObjectSizeInPagedSpace();
-  CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE));
+  Heap::CollectGarbage(NEW_SPACE);
 
   Address current_top = Heap::new_space()->top();
   Page* page = Page::FromAddress(current_top);
@@ -958,6 +954,7 @@
   CHECK(page->IsRegionDirty(clone_addr + (object_size - kPointerSize)));
 }
 
+
 TEST(TestCodeFlushing) {
   i::FLAG_allow_natives_syntax = true;
   // If we do not flush code this test is invalid.
@@ -1001,3 +998,91 @@
   CHECK(function->shared()->is_compiled());
   CHECK(function->is_compiled());
 }
+
+
+// Count the number of global contexts in the weak list of global contexts.
+static int CountGlobalContexts() {
+  int count = 0;
+  Object* object = Heap::global_contexts_list();
+  while (!object->IsUndefined()) {
+    count++;
+    object = Context::cast(object)->get(Context::NEXT_CONTEXT_LINK);
+  }
+  return count;
+}
+
+
+TEST(TestInternalWeakLists) {
+  static const int kNumTestContexts = 10;
+
+  v8::HandleScope scope;
+  v8::Persistent<v8::Context> ctx[kNumTestContexts];
+
+  CHECK_EQ(0, CountGlobalContexts());
+
+  // Create a number of global contests which gets linked together.
+  for (int i = 0; i < kNumTestContexts; i++) {
+    ctx[i] = v8::Context::New();
+    CHECK_EQ(i + 1, CountGlobalContexts());
+
+    ctx[i]->Enter();
+    ctx[i]->Exit();
+  }
+
+  // Force compilation cache cleanup.
+  Heap::CollectAllGarbage(true);
+
+  // Dispose the global contexts one by one.
+  for (int i = 0; i < kNumTestContexts; i++) {
+    ctx[i].Dispose();
+    ctx[i].Clear();
+
+    // Scavenge treats these references as strong.
+    for (int j = 0; j < 10; j++) {
+      Heap::PerformScavenge();
+      CHECK_EQ(kNumTestContexts - i, CountGlobalContexts());
+    }
+
+    // Mark compact handles the weak references.
+    Heap::CollectAllGarbage(true);
+    CHECK_EQ(kNumTestContexts - i - 1, CountGlobalContexts());
+  }
+
+  CHECK_EQ(0, CountGlobalContexts());
+}
+
+
+// Count the number of global contexts in the weak list of global contexts
+// causing a GC after the specified number of elements.
+static int CountGlobalContextsWithGC(int n) {
+  int count = 0;
+  Handle<Object> object(Heap::global_contexts_list());
+  while (!object->IsUndefined()) {
+    count++;
+    if (count == n) Heap::CollectAllGarbage(true);
+    object =
+        Handle<Object>(Context::cast(*object)->get(Context::NEXT_CONTEXT_LINK));
+  }
+  return count;
+}
+
+
+TEST(TestInternalWeakListsTraverseWithGC) {
+  static const int kNumTestContexts = 10;
+
+  v8::HandleScope scope;
+  v8::Persistent<v8::Context> ctx[kNumTestContexts];
+
+  CHECK_EQ(0, CountGlobalContexts());
+
+  // Create an number of contexts and check the length of the weak list both
+  // with and without GCs while iterating the list.
+  for (int i = 0; i < kNumTestContexts; i++) {
+    ctx[i] = v8::Context::New();
+    CHECK_EQ(i + 1, CountGlobalContexts());
+    CHECK_EQ(i + 1, CountGlobalContextsWithGC(i / 2 + 1));
+
+    ctx[i]->Enter();
+    ctx[i]->Exit();
+  }
+}
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index b364ae3..16d0f00 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -469,7 +469,7 @@
   CHECK(!sampler.WasSampleStackCalled());
   nonJsThread.WaitForRunning();
   nonJsThread.SendSigProf();
-  CHECK(sampler.WaitForTick());
+  CHECK(!sampler.WaitForTick());
   CHECK(!sampler.WasSampleStackCalled());
   sampler.Stop();
 
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index e4ac1b7..531b1f7 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -94,7 +94,7 @@
   CHECK(Heap::InSpace(*array, NEW_SPACE));
 
   // Call the m-c collector, so array becomes an old object.
-  CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
+  Heap::CollectGarbage(OLD_POINTER_SPACE);
 
   // Array now sits in the old space
   CHECK(Heap::InSpace(*array, OLD_POINTER_SPACE));
@@ -111,7 +111,7 @@
   v8::HandleScope sc;
 
   // Do a mark compact GC to shrink the heap.
-  CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
+  Heap::CollectGarbage(OLD_POINTER_SPACE);
 
   // Allocate a big Fixed array in the new space.
   int size = (Heap::MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
@@ -134,7 +134,7 @@
   }
 
   // Call mark compact GC, and it should pass.
-  CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
+  Heap::CollectGarbage(OLD_POINTER_SPACE);
 
   // array should not be promoted because the old space is full.
   CHECK(Heap::InSpace(*array, NEW_SPACE));
@@ -146,7 +146,7 @@
 
   v8::HandleScope sc;
   // call mark-compact when heap is empty
-  CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
+  Heap::CollectGarbage(OLD_POINTER_SPACE);
 
   // keep allocating garbage in new space until it fails
   const int ARRAY_SIZE = 100;
@@ -154,7 +154,7 @@
   do {
     array = Heap::AllocateFixedArray(ARRAY_SIZE);
   } while (!array->IsFailure());
-  CHECK(Heap::CollectGarbage(0, NEW_SPACE));
+  Heap::CollectGarbage(NEW_SPACE);
 
   array = Heap::AllocateFixedArray(ARRAY_SIZE);
   CHECK(!array->IsFailure());
@@ -164,7 +164,7 @@
   do {
     mapp = Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
   } while (!mapp->IsFailure());
-  CHECK(Heap::CollectGarbage(0, MAP_SPACE));
+  Heap::CollectGarbage(MAP_SPACE);
   mapp = Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
   CHECK(!mapp->IsFailure());
 
@@ -182,7 +182,7 @@
   Top::context()->global()->SetProperty(func_name, function, NONE);
 
   JSObject* obj = JSObject::cast(Heap::AllocateJSObject(function));
-  CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
+  Heap::CollectGarbage(OLD_POINTER_SPACE);
 
   func_name = String::cast(Heap::LookupAsciiSymbol("theFunction"));
   CHECK(Top::context()->global()->HasLocalProperty(func_name));
@@ -196,7 +196,7 @@
   String* prop_name = String::cast(Heap::LookupAsciiSymbol("theSlot"));
   obj->SetProperty(prop_name, Smi::FromInt(23), NONE);
 
-  CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
+  Heap::CollectGarbage(OLD_POINTER_SPACE);
 
   obj_name = String::cast(Heap::LookupAsciiSymbol("theObject"));
   CHECK(Top::context()->global()->HasLocalProperty(obj_name));
@@ -264,7 +264,7 @@
   CHECK_EQ(0, gc_starts);
   CHECK_EQ(gc_ends, gc_starts);
 
-  CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
+  Heap::CollectGarbage(OLD_POINTER_SPACE);
   CHECK_EQ(1, gc_starts);
   CHECK_EQ(gc_ends, gc_starts);
 }
@@ -317,7 +317,7 @@
     GlobalHandles::AddGroup(g2_objects, 2);
   }
   // Do a full GC
-  CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
+  Heap::CollectGarbage(OLD_POINTER_SPACE);
 
   // All object should be alive.
   CHECK_EQ(0, NumberOfWeakCalls);
@@ -335,7 +335,7 @@
     GlobalHandles::AddGroup(g2_objects, 2);
   }
 
-  CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
+  Heap::CollectGarbage(OLD_POINTER_SPACE);
 
   // All objects should be gone. 5 global handles in total.
   CHECK_EQ(5, NumberOfWeakCalls);
diff --git a/test/cctest/test-strtod.cc b/test/cctest/test-strtod.cc
index 6102db6..ae1c00d 100644
--- a/test/cctest/test-strtod.cc
+++ b/test/cctest/test-strtod.cc
@@ -198,4 +198,10 @@
   CHECK_EQ(1234e304, StrtodChar("0000000123400000", 299));
   CHECK_EQ(V8_INFINITY, StrtodChar("00000000180000000", 300));
   CHECK_EQ(17e307, StrtodChar("00000000170000000", 300));
+
+  // The following number is the result of 89255.0/1e-22. Both floating-point
+  // numbers can be accurately represented with doubles. However on Linux,x86
+  // the floating-point stack is set to 80bits and the double-rounding
+  // introduces an error.
+  CHECK_EQ(89255e-22, StrtodChar("89255", -22));
 }
diff --git a/test/mjsunit/int32-ops.js b/test/mjsunit/int32-ops.js
new file mode 100644
index 0000000..1883926
--- /dev/null
+++ b/test/mjsunit/int32-ops.js
@@ -0,0 +1,227 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Repeat most the tests in smi-ops.js that use SMI_MIN and SMI_MAX, but
+// with SMI_MIN and SMI_MAX from the 64-bit platform, which represents all
+// signed 32-bit integer values as smis.
+
+const SMI_MAX = (1 << 30) - 1 + (1 << 30);  // Create without overflowing.
+const SMI_MIN = -SMI_MAX - 1;  // Create without overflowing.
+const ONE = 1;
+const ONE_HUNDRED = 100;
+
+const OBJ_42 = new (function() {
+  this.valueOf = function() { return 42; };
+})();
+
+assertEquals(42, OBJ_42.valueOf());
+
+
+function Add1(x) {
+  return x + 1;
+}
+
+function Add100(x) {
+  return x + 100;
+}
+
+function Add1Reversed(x) {
+  return 1 + x;
+}
+
+function Add100Reversed(x) {
+  return 100 + x;
+}
+
+
+assertEquals(1, Add1(0));  // fast case
+assertEquals(1, Add1Reversed(0));  // fast case
+assertEquals(SMI_MAX + ONE, Add1(SMI_MAX), "smimax + 1");
+assertEquals(SMI_MAX + ONE, Add1Reversed(SMI_MAX), "1 + smimax");
+assertEquals(42 + ONE, Add1(OBJ_42));  // non-smi
+assertEquals(42 + ONE, Add1Reversed(OBJ_42));  // non-smi
+
+assertEquals(100, Add100(0));  // fast case
+assertEquals(100, Add100Reversed(0));  // fast case
+assertEquals(SMI_MAX + ONE_HUNDRED, Add100(SMI_MAX), "smimax + 100");
+assertEquals(SMI_MAX + ONE_HUNDRED, Add100Reversed(SMI_MAX), " 100 + smimax");
+assertEquals(42 + ONE_HUNDRED, Add100(OBJ_42));  // non-smi
+assertEquals(42 + ONE_HUNDRED, Add100Reversed(OBJ_42));  // non-smi
+
+
+
+function Sub1(x) {
+  return x - 1;
+}
+
+function Sub100(x) {
+  return x - 100;
+}
+
+function Sub1Reversed(x) {
+  return 1 - x;
+}
+
+function Sub100Reversed(x) {
+  return 100 - x;
+}
+
+
+assertEquals(0, Sub1(1));  // fast case
+assertEquals(-1, Sub1Reversed(2));  // fast case
+assertEquals(SMI_MIN - ONE, Sub1(SMI_MIN));  // overflow
+assertEquals(ONE - SMI_MIN, Sub1Reversed(SMI_MIN));  // overflow
+assertEquals(42 - ONE, Sub1(OBJ_42));  // non-smi
+assertEquals(ONE - 42, Sub1Reversed(OBJ_42));  // non-smi
+
+assertEquals(0, Sub100(100));  // fast case
+assertEquals(1, Sub100Reversed(99));  // fast case
+assertEquals(SMI_MIN - ONE_HUNDRED, Sub100(SMI_MIN));  // overflow
+assertEquals(ONE_HUNDRED - SMI_MIN, Sub100Reversed(SMI_MIN));  // overflow
+assertEquals(42 - ONE_HUNDRED, Sub100(OBJ_42));  // non-smi
+assertEquals(ONE_HUNDRED - 42, Sub100Reversed(OBJ_42));  // non-smi
+
+
+function Shr1(x) {
+  return x >>> 1;
+}
+
+function Shr100(x) {
+  return x >>> 100;
+}
+
+function Shr1Reversed(x) {
+  return 1 >>> x;
+}
+
+function Shr100Reversed(x) {
+  return 100 >>> x;
+}
+
+function Sar1(x) {
+  return x >> 1;
+}
+
+function Sar100(x) {
+  return x >> 100;
+}
+
+function Sar1Reversed(x) {
+  return 1 >> x;
+}
+
+function Sar100Reversed(x) {
+  return 100 >> x;
+}
+
+
+assertEquals(0, Shr1(1));
+assertEquals(0, Sar1(1));
+assertEquals(0, Shr1Reversed(2));
+assertEquals(0, Sar1Reversed(2));
+assertEquals(1073741824, Shr1(SMI_MIN));
+assertEquals(-1073741824, Sar1(SMI_MIN));
+assertEquals(1, Shr1Reversed(SMI_MIN));
+assertEquals(1, Sar1Reversed(SMI_MIN));
+assertEquals(21, Shr1(OBJ_42));
+assertEquals(21, Sar1(OBJ_42));
+assertEquals(0, Shr1Reversed(OBJ_42));
+assertEquals(0, Sar1Reversed(OBJ_42));
+
+assertEquals(6, Shr100(100), "100 >>> 100");
+assertEquals(6, Sar100(100), "100 >> 100");
+assertEquals(12, Shr100Reversed(99));
+assertEquals(12, Sar100Reversed(99));
+assertEquals(134217728, Shr100(SMI_MIN));
+assertEquals(-134217728, Sar100(SMI_MIN));
+assertEquals(100, Shr100Reversed(SMI_MIN));
+assertEquals(100, Sar100Reversed(SMI_MIN));
+assertEquals(2, Shr100(OBJ_42));
+assertEquals(2, Sar100(OBJ_42));
+assertEquals(0, Shr100Reversed(OBJ_42));
+assertEquals(0, Sar100Reversed(OBJ_42));
+
+
+function Xor1(x) {
+  return x ^ 1;
+}
+
+function Xor100(x) {
+  return x ^ 100;
+}
+
+function Xor1Reversed(x) {
+  return 1 ^ x;
+}
+
+function Xor100Reversed(x) {
+  return 100 ^ x;
+}
+
+
+assertEquals(0, Xor1(1));
+assertEquals(3, Xor1Reversed(2));
+assertEquals(SMI_MIN + 1, Xor1(SMI_MIN));
+assertEquals(SMI_MIN + 1, Xor1Reversed(SMI_MIN));
+assertEquals(43, Xor1(OBJ_42));
+assertEquals(43, Xor1Reversed(OBJ_42));
+
+assertEquals(0, Xor100(100));
+assertEquals(7, Xor100Reversed(99));
+assertEquals(-2147483548, Xor100(SMI_MIN));
+assertEquals(-2147483548, Xor100Reversed(SMI_MIN));
+assertEquals(78, Xor100(OBJ_42));
+assertEquals(78, Xor100Reversed(OBJ_42));
+
+var x = 0x23; var y = 0x35;
+assertEquals(0x16, x ^ y);
+
+
+// Bitwise not.
+var v = 0;
+assertEquals(-1, ~v);
+v = SMI_MIN;
+assertEquals(0x7fffffff, ~v, "~smimin");
+v = SMI_MAX;
+assertEquals(-0x80000000, ~v, "~smimax");
+
+// Overflowing ++ and --.
+v = SMI_MAX;
+v++;
+assertEquals(0x80000000, v, "smimax++");
+v = SMI_MIN;
+v--;
+assertEquals(-0x80000001, v, "smimin--");
+
+// Check that comparisons of numbers separated by MIN_SMI work.
+assertFalse(SMI_MIN > 0);
+assertFalse(SMI_MIN + 1 > 1);
+assertFalse(SMI_MIN + 1 > 2);
+assertFalse(SMI_MIN + 2 > 1);
+assertFalse(0 < SMI_MIN);
+assertTrue(-1 < SMI_MAX);
+assertFalse(SMI_MAX < -1);
diff --git a/test/mjsunit/regexp.js b/test/mjsunit/regexp.js
index 5836ddb..b57b86d 100644
--- a/test/mjsunit/regexp.js
+++ b/test/mjsunit/regexp.js
@@ -589,3 +589,61 @@
 assertEquals(false, desc.configurable);
 assertEquals(false, desc.enumerable);
 assertEquals(true, desc.writable);
+
+
+// Check that end-anchored regexps are optimized correctly.
+var re = /(?:a|bc)g$/;
+assertTrue(re.test("ag"));
+assertTrue(re.test("bcg"));
+assertTrue(re.test("abcg"));
+assertTrue(re.test("zimbag"));
+assertTrue(re.test("zimbcg"));
+
+assertFalse(re.test("g"));
+assertFalse(re.test(""));
+
+// Global regexp (non-zero start).
+var re = /(?:a|bc)g$/g;
+assertTrue(re.test("ag"));
+re.lastIndex = 1;  // Near start of string.
+assertTrue(re.test("zimbag"));
+re.lastIndex = 6;  // At end of string.
+assertFalse(re.test("zimbag"));
+re.lastIndex = 5;  // Near end of string.
+assertFalse(re.test("zimbag"));
+re.lastIndex = 4;
+assertTrue(re.test("zimbag"));
+
+// Anchored at both ends.
+var re = /^(?:a|bc)g$/g;
+assertTrue(re.test("ag"));
+re.lastIndex = 1;
+assertFalse(re.test("ag"));
+re.lastIndex = 1;
+assertFalse(re.test("zag"));
+
+// Long max_length of RegExp.
+var re = /VeryLongRegExp!{1,1000}$/;
+assertTrue(re.test("BahoolaVeryLongRegExp!!!!!!"));
+assertFalse(re.test("VeryLongRegExp"));
+assertFalse(re.test("!"));
+
+// End anchor inside disjunction.
+var re = /(?:a$|bc$)/;
+assertTrue(re.test("a"));
+assertTrue(re.test("bc"));
+assertTrue(re.test("abc"));
+assertTrue(re.test("zimzamzumba"));
+assertTrue(re.test("zimzamzumbc"));
+assertFalse(re.test("c"));
+assertFalse(re.test(""));
+
+// Only partially anchored.
+var re = /(?:a|bc$)/;
+assertTrue(re.test("a"));
+assertTrue(re.test("bc"));
+assertEquals(["a"], re.exec("abc"));
+assertEquals(4, re.exec("zimzamzumba").index);
+assertEquals(["bc"], re.exec("zimzomzumbc"));
+assertFalse(re.test("c"));
+assertFalse(re.test(""));
diff --git a/test/mjsunit/smi-ops.js b/test/mjsunit/smi-ops.js
index 499535c..8fa6fec 100644
--- a/test/mjsunit/smi-ops.js
+++ b/test/mjsunit/smi-ops.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,8 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-const SMI_MAX = (1 << 30) - 1;
-const SMI_MIN = -(1 << 30);
+const SMI_MAX = (1 << 29) - 1 + (1 << 29);  // Create without overflowing.
+const SMI_MIN = -SMI_MAX - 1;  // Create without overflowing.
 const ONE = 1;
 const ONE_HUNDRED = 100;
 
@@ -213,6 +213,15 @@
 v--;
 assertEquals(-0x40000001, v, "smimin--");
 
+// Check that comparisons of numbers separated by MIN_SMI work.
+assertFalse(SMI_MIN > 0);
+assertFalse(SMI_MIN + 1 > 1);
+assertFalse(SMI_MIN + 1 > 2);
+assertFalse(SMI_MIN + 2 > 1);
+assertFalse(0 < SMI_MIN);
+assertTrue(-1 < SMI_MAX);
+assertFalse(SMI_MAX < -1);
+
 // Not actually Smi operations.
 // Check that relations on unary ops work.
 var v = -1.2;
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index fd1aff3..17d556f 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -476,7 +476,6 @@
         '../../src/virtual-frame.cc',
         '../../src/virtual-frame.h',
         '../../src/vm-state-inl.h',
-        '../../src/vm-state.cc',
         '../../src/vm-state.h',
         '../../src/zone-inl.h',
         '../../src/zone.cc',
diff --git a/tools/ll_prof.py b/tools/ll_prof.py
new file mode 100755
index 0000000..563084d
--- /dev/null
+++ b/tools/ll_prof.py
@@ -0,0 +1,955 @@
+#!/usr/bin/env python
+#
+# Copyright 2010 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import bisect
+import collections
+import ctypes
+import mmap
+import optparse
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import time
+
+
+USAGE="""usage: %prog [OPTION]...
+
+Analyses V8 and perf logs to produce profiles.
+
+Perf logs can be collected using a command like:
+  $ perf record -R -e cycles -c 10000 -f -i ./shell bench.js --ll-prof
+  # -R: collect all data
+  # -e cycles: use cpu-cycles event (run "perf list" for details)
+  # -c 10000: write a sample after each 10000 events
+  # -f: force output file overwrite
+  # -i: limit profiling to our process and the kernel
+  # --ll-prof shell flag enables the right V8 logs
+This will produce a binary trace file (perf.data) that %prog can analyse.
+
+Examples:
+  # Print flat profile with annotated disassembly for the 10 top
+  # symbols. Use default log names and include the snapshot log.
+  $ %prog --snapshot --disasm-top=10
+
+  # Print flat profile with annotated disassembly for all used symbols.
+  # Use default log names and include kernel symbols into analysis.
+  $ %prog --disasm-all --kernel
+
+  # Print flat profile. Use custom log names.
+  $ %prog --log=foo.log --snapshot-log=snap-foo.log --trace=foo.data --snapshot
+"""
+
+
+# Must match kGcFakeMmap.
+V8_GC_FAKE_MMAP = "/tmp/__v8_gc__"
+
+JS_ORIGIN = "js"
+JS_SNAPSHOT_ORIGIN = "js-snapshot"
+
+# Avoid using the slow (google-specific) wrapper around objdump.
+OBJDUMP_BIN = "/usr/bin/objdump"
+if not os.path.exists(OBJDUMP_BIN):
+  OBJDUMP_BIN = "objdump"
+
+
+class Code(object):
+  """Code object."""
+
+  _COMMON_DISASM_OPTIONS = ["-M", "intel-mnemonic", "-C"]
+
+  _DISASM_HEADER_RE = re.compile(r"[a-f0-9]+\s+<.*:$")
+  _DISASM_LINE_RE = re.compile(r"\s*([a-f0-9]+):.*")
+
+  # Keys must match constants in Logger::LogCodeInfo.
+  _ARCH_MAP = {
+    "ia32": "-m i386",
+    "x64": "-m i386 -M x86-64",
+    "arm": "-m arm"  # Not supported by our objdump build.
+  }
+
+  _id = 0
+
+  def __init__(self, name, start_address, end_address, origin, origin_offset):
+    self.id = Code._id
+    Code._id += 1
+    self.name = name
+    self.other_names = None
+    self.start_address = start_address
+    self.end_address = end_address
+    self.origin = origin
+    self.origin_offset = origin_offset
+    self.self_ticks = 0
+    self.self_ticks_map = None
+    self.callee_ticks = None
+
+  def AddName(self, name):
+    assert self.name != name
+    if self.other_names is None:
+      self.other_names = [name]
+      return
+    if not name in self.other_names:
+      self.other_names.append(name)
+
+  def FullName(self):
+    if self.other_names is None:
+      return self.name
+    self.other_names.sort()
+    return "%s (aka %s)" % (self.name, ", ".join(self.other_names))
+
+  def IsUsed(self):
+    return self.self_ticks > 0 or self.callee_ticks is not None
+
+  def Tick(self, pc):
+    self.self_ticks += 1
+    if self.self_ticks_map is None:
+      self.self_ticks_map = collections.defaultdict(lambda: 0)
+    offset = pc - self.start_address
+    self.self_ticks_map[offset] += 1
+
+  def CalleeTick(self, callee):
+    if self.callee_ticks is None:
+      self.callee_ticks = collections.defaultdict(lambda: 0)
+    self.callee_ticks[callee] += 1
+
+  def PrintAnnotated(self, code_info, options):
+    if self.self_ticks_map is None:
+      ticks_map = []
+    else:
+      ticks_map = self.self_ticks_map.items()
+    # Convert the ticks map to offsets and counts arrays so that later
+    # we can do binary search in the offsets array.
+    ticks_map.sort(key=lambda t: t[0])
+    ticks_offsets = [t[0] for t in ticks_map]
+    ticks_counts = [t[1] for t in ticks_map]
+    # Get a list of disassembled lines and their addresses.
+    lines = []
+    for line in self._GetDisasmLines(code_info, options):
+      match = Code._DISASM_LINE_RE.match(line)
+      if match:
+        line_address = int(match.group(1), 16)
+        lines.append((line_address, line))
+    if len(lines) == 0:
+      return
+    # Print annotated lines.
+    address = lines[0][0]
+    total_count = 0
+    for i in xrange(len(lines)):
+      start_offset = lines[i][0] - address
+      if i == len(lines) - 1:
+        end_offset = self.end_address - self.start_address
+      else:
+        end_offset = lines[i + 1][0] - address
+      # Ticks (reported pc values) are not always precise, i.e. not
+      # necessarily point at instruction starts. So we have to search
+      # for ticks that touch the current instruction line.
+      j = bisect.bisect_left(ticks_offsets, end_offset)
+      count = 0
+      for offset, cnt in reversed(zip(ticks_offsets[:j], ticks_counts[:j])):
+        if offset < start_offset:
+          break
+        count += cnt
+      total_count += count
+      count = 100.0 * count / self.self_ticks
+      if count >= 0.01:
+        print "%15.2f %s" % (count, lines[i][1])
+      else:
+        print "%s %s" % (" " * 15, lines[i][1])
+    print
+    assert total_count == self.self_ticks, \
+        "Lost ticks (%d != %d) in %s" % (total_count, self.self_ticks, self)
+
+  def __str__(self):
+    return "%s [0x%x, 0x%x) size: %d origin: %s" % (
+      self.name,
+      self.start_address,
+      self.end_address,
+      self.end_address - self.start_address,
+      self.origin)
+
+  def _GetDisasmLines(self, code_info, options):
+    tmp_name = None
+    if self.origin == JS_ORIGIN or self.origin == JS_SNAPSHOT_ORIGIN:
+      assert code_info.arch in Code._ARCH_MAP, \
+          "Unsupported architecture '%s'" % arch
+      arch_flags = Code._ARCH_MAP[code_info.arch]
+      # Create a temporary file just with this code object.
+      tmp_name = tempfile.mktemp(".v8code")
+      size = self.end_address - self.start_address
+      command = "dd if=%s.code of=%s bs=1 count=%d skip=%d && " \
+                "%s %s -D -b binary %s %s" % (
+        options.log, tmp_name, size, self.origin_offset,
+        OBJDUMP_BIN, ' '.join(Code._COMMON_DISASM_OPTIONS), arch_flags,
+        tmp_name)
+    else:
+      command = "%s %s --start-address=%d --stop-address=%d -d %s " % (
+        OBJDUMP_BIN, ' '.join(Code._COMMON_DISASM_OPTIONS),
+        self.origin_offset,
+        self.origin_offset + self.end_address - self.start_address,
+        self.origin)
+    process = subprocess.Popen(command,
+                               shell=True,
+                               stdout=subprocess.PIPE,
+                               stderr=subprocess.STDOUT)
+    out, err = process.communicate()
+    lines = out.split("\n")
+    header_line = 0
+    for i, line in enumerate(lines):
+      if Code._DISASM_HEADER_RE.match(line):
+        header_line = i
+        break
+    if tmp_name:
+      os.unlink(tmp_name)
+    return lines[header_line + 1:]
+
+
+class CodePage(object):
+  """Group of adjacent code objects."""
+
+  SHIFT = 12  # 4K pages
+  SIZE = (1 << SHIFT)
+  MASK = ~(SIZE - 1)
+
+  @staticmethod
+  def PageAddress(address):
+    return address & CodePage.MASK
+
+  @staticmethod
+  def PageId(address):
+    return address >> CodePage.SHIFT
+
+  @staticmethod
+  def PageAddressFromId(id):
+    return id << CodePage.SHIFT
+
+  def __init__(self, address):
+    self.address = address
+    self.code_objects = []
+
+  def Add(self, code):
+    self.code_objects.append(code)
+
+  def Remove(self, code):
+    self.code_objects.remove(code)
+
+  def Find(self, pc):
+    code_objects = self.code_objects
+    for i, code in enumerate(code_objects):
+      if code.start_address <= pc < code.end_address:
+        code_objects[0], code_objects[i] = code, code_objects[0]
+        return code
+    return None
+
+  def __iter__(self):
+    return self.code_objects.__iter__()
+
+
+class CodeMap(object):
+  """Code object map."""
+
+  def __init__(self):
+    self.pages = {}
+    self.min_address = 1 << 64
+    self.max_address = -1
+
+  def Add(self, code, max_pages=-1):
+    page_id = CodePage.PageId(code.start_address)
+    limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
+    pages = 0
+    while page_id < limit_id:
+      if max_pages >= 0 and pages > max_pages:
+        print >>sys.stderr, \
+            "Warning: page limit (%d) reached for %s [%s]" % (
+            max_pages, code.name, code.origin)
+        break
+      if page_id in self.pages:
+        page = self.pages[page_id]
+      else:
+        page = CodePage(CodePage.PageAddressFromId(page_id))
+        self.pages[page_id] = page
+      page.Add(code)
+      page_id += 1
+      pages += 1
+    self.min_address = min(self.min_address, code.start_address)
+    self.max_address = max(self.max_address, code.end_address)
+
+  def Remove(self, code):
+    page_id = CodePage.PageId(code.start_address)
+    limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
+    removed = False
+    while page_id < limit_id:
+      if page_id not in self.pages:
+        page_id += 1
+        continue
+      page = self.pages[page_id]
+      page.Remove(code)
+      removed = True
+      page_id += 1
+    return removed
+
+  def AllCode(self):
+    for page in self.pages.itervalues():
+      for code in page:
+        if CodePage.PageAddress(code.start_address) == page.address:
+          yield code
+
+  def UsedCode(self):
+    for code in self.AllCode():
+      if code.IsUsed():
+        yield code
+
+  def Print(self):
+    for code in self.AllCode():
+      print code
+
+  def Find(self, pc):
+    if pc < self.min_address or pc >= self.max_address:
+      return None
+    page_id = CodePage.PageId(pc)
+    if page_id not in self.pages:
+      return None
+    return self.pages[page_id].Find(pc)
+
+
+class CodeInfo(object):
+  """Generic info about generated code objects."""
+
+  def __init__(self, arch, header_size):
+    self.arch = arch
+    self.header_size = header_size
+
+
+class CodeLogReader(object):
+  """V8 code event log reader."""
+
+  _CODE_INFO_RE = re.compile(
+    r"code-info,([^,]+),(\d+)")
+
+  _CODE_CREATE_RE = re.compile(
+    r"code-creation,([^,]+),(0x[a-f0-9]+),(\d+),\"([^\"]*)\"(?:,(\d+))?")
+
+  _CODE_MOVE_RE = re.compile(
+    r"code-move,(0x[a-f0-9]+),(0x[a-f0-9]+)")
+
+  _CODE_DELETE_RE = re.compile(
+    r"code-delete,(0x[a-f0-9]+)")
+
+  _SNAPSHOT_POS_RE = re.compile(
+    r"snapshot-pos,(0x[a-f0-9]+),(\d+)")
+
+  _CODE_MOVING_GC = "code-moving-gc"
+
+  def __init__(self, log_name, code_map, is_snapshot, snapshot_pos_to_name):
+    self.log = open(log_name, "r")
+    self.code_map = code_map
+    self.is_snapshot = is_snapshot
+    self.snapshot_pos_to_name = snapshot_pos_to_name
+    self.address_to_snapshot_name = {}
+
+  def ReadCodeInfo(self):
+    line = self.log.readline() or ""
+    match = CodeLogReader._CODE_INFO_RE.match(line)
+    assert match, "No code info in log"
+    return CodeInfo(arch=match.group(1), header_size=int(match.group(2)))
+
+  def ReadUpToGC(self, code_info):
+    made_progress = False
+    code_header_size = code_info.header_size
+    while True:
+      line = self.log.readline()
+      if not line:
+        return made_progress
+      made_progress = True
+
+      if line.startswith(CodeLogReader._CODE_MOVING_GC):
+        self.address_to_snapshot_name.clear()
+        return made_progress
+
+      match = CodeLogReader._CODE_CREATE_RE.match(line)
+      if match:
+        start_address = int(match.group(2), 16) + code_header_size
+        end_address = start_address + int(match.group(3)) - code_header_size
+        if start_address in self.address_to_snapshot_name:
+          name = self.address_to_snapshot_name[start_address]
+          origin = JS_SNAPSHOT_ORIGIN
+        else:
+          name = "%s:%s" % (match.group(1), match.group(4))
+          origin = JS_ORIGIN
+        if self.is_snapshot:
+          origin_offset = 0
+        else:
+          origin_offset = int(match.group(5))
+        code = Code(name, start_address, end_address, origin, origin_offset)
+        conficting_code = self.code_map.Find(start_address)
+        if conficting_code:
+          CodeLogReader._HandleCodeConflict(conficting_code, code)
+          # TODO(vitalyr): this warning is too noisy because of our
+          # attempts to reconstruct code log from the snapshot.
+          # print >>sys.stderr, \
+          #     "Warning: Skipping duplicate code log entry %s" % code
+          continue
+        self.code_map.Add(code)
+        continue
+
+      match = CodeLogReader._CODE_MOVE_RE.match(line)
+      if match:
+        old_start_address = int(match.group(1), 16) + code_header_size
+        new_start_address = int(match.group(2), 16) + code_header_size
+        if old_start_address == new_start_address:
+          # Skip useless code move entries.
+          continue
+        code = self.code_map.Find(old_start_address)
+        if not code:
+          print >>sys.stderr, "Warning: Not found %x" % old_start_address
+          continue
+        assert code.start_address == old_start_address, \
+            "Inexact move address %x for %s" % (old_start_address, code)
+        self.code_map.Remove(code)
+        size = code.end_address - code.start_address
+        code.start_address = new_start_address
+        code.end_address = new_start_address + size
+        self.code_map.Add(code)
+        continue
+
+      match = CodeLogReader._CODE_DELETE_RE.match(line)
+      if match:
+        old_start_address = int(match.group(1), 16) + code_header_size
+        code = self.code_map.Find(old_start_address)
+        if not code:
+          print >>sys.stderr, "Warning: Not found %x" % old_start_address
+          continue
+        assert code.start_address == old_start_address, \
+            "Inexact delete address %x for %s" % (old_start_address, code)
+        self.code_map.Remove(code)
+        continue
+
+      match = CodeLogReader._SNAPSHOT_POS_RE.match(line)
+      if match:
+        start_address = int(match.group(1), 16) + code_header_size
+        snapshot_pos = int(match.group(2))
+        if self.is_snapshot:
+          code = self.code_map.Find(start_address)
+          if code:
+            assert code.start_address == start_address, \
+                "Inexact snapshot address %x for %s" % (start_address, code)
+            self.snapshot_pos_to_name[snapshot_pos] = code.name
+        else:
+          if snapshot_pos in self.snapshot_pos_to_name:
+            self.address_to_snapshot_name[start_address] = \
+                self.snapshot_pos_to_name[snapshot_pos]
+
+  def Dispose(self):
+    self.log.close()
+
+  @staticmethod
+  def _HandleCodeConflict(old_code, new_code):
+    assert (old_code.start_address == new_code.start_address and
+            old_code.end_address == new_code.end_address), \
+        "Conficting code log entries %s and %s" % (old_code, new_code)
+    CodeLogReader._UpdateNames(old_code, new_code)
+
+  @staticmethod
+  def _UpdateNames(old_code, new_code):
+    if old_code.name == new_code.name:
+      return
+    # Kludge: there are code objects with custom names that don't
+    # match their flags.
+    misnamed_code = set(["Builtin:CpuFeatures::Probe"])
+    if old_code.name in misnamed_code:
+      return
+    # Code object may be shared by a few functions. Collect the full
+    # set of names.
+    old_code.AddName(new_code.name)
+
+
+class Descriptor(object):
+  """Descriptor of a structure in the binary trace log."""
+
+  CTYPE_MAP = {
+    "u16": ctypes.c_uint16,
+    "u32": ctypes.c_uint32,
+    "u64": ctypes.c_uint64
+  }
+
+  def __init__(self, fields):
+    class TraceItem(ctypes.Structure):
+      _fields_ = Descriptor.CtypesFields(fields)
+
+      def __str__(self):
+        return ", ".join("%s: %s" % (field, self.__getattribute__(field))
+                         for field, _ in TraceItem._fields_)
+
+    self.ctype = TraceItem
+
+  def Read(self, trace, offset):
+    return self.ctype.from_buffer(trace, offset)
+
+  @staticmethod
+  def CtypesFields(fields):
+    return [(field, Descriptor.CTYPE_MAP[format]) for (field, format) in fields]
+
+
+# Please see http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=tree;f=tools/perf
+# for the gory details.
+
+
+TRACE_HEADER_DESC = Descriptor([
+  ("magic", "u64"),
+  ("size", "u64"),
+  ("attr_size", "u64"),
+  ("attrs_offset", "u64"),
+  ("attrs_size", "u64"),
+  ("data_offset", "u64"),
+  ("data_size", "u64"),
+  ("event_types_offset", "u64"),
+  ("event_types_size", "u64")
+])
+
+
+PERF_EVENT_ATTR_DESC = Descriptor([
+  ("type", "u32"),
+  ("size", "u32"),
+  ("config", "u64"),
+  ("sample_period_or_freq", "u64"),
+  ("sample_type", "u64"),
+  ("read_format", "u64"),
+  ("flags", "u64"),
+  ("wakeup_events_or_watermark", "u32"),
+  ("bt_type", "u32"),
+  ("bp_addr", "u64"),
+  ("bp_len", "u64"),
+])
+
+
+PERF_EVENT_HEADER_DESC = Descriptor([
+  ("type", "u32"),
+  ("misc", "u16"),
+  ("size", "u16")
+])
+
+
+PERF_MMAP_EVENT_BODY_DESC = Descriptor([
+  ("pid", "u32"),
+  ("tid", "u32"),
+  ("addr", "u64"),
+  ("len", "u64"),
+  ("pgoff", "u64")
+])
+
+
+# perf_event_attr.sample_type bits control the set of
+# perf_sample_event fields.
+PERF_SAMPLE_IP = 1 << 0
+PERF_SAMPLE_TID = 1 << 1
+PERF_SAMPLE_TIME = 1 << 2
+PERF_SAMPLE_ADDR = 1 << 3
+PERF_SAMPLE_READ = 1 << 4
+PERF_SAMPLE_CALLCHAIN = 1 << 5
+PERF_SAMPLE_ID = 1 << 6
+PERF_SAMPLE_CPU = 1 << 7
+PERF_SAMPLE_PERIOD = 1 << 8
+PERF_SAMPLE_STREAM_ID = 1 << 9
+PERF_SAMPLE_RAW = 1 << 10
+
+
+PERF_SAMPLE_EVENT_BODY_FIELDS = [
+  ("ip", "u64", PERF_SAMPLE_IP),
+  ("pid", "u32", PERF_SAMPLE_TID),
+  ("tid", "u32", PERF_SAMPLE_TID),
+  ("time", "u64", PERF_SAMPLE_TIME),
+  ("addr", "u64", PERF_SAMPLE_ADDR),
+  ("id", "u64", PERF_SAMPLE_ID),
+  ("stream_id", "u64", PERF_SAMPLE_STREAM_ID),
+  ("cpu", "u32", PERF_SAMPLE_CPU),
+  ("res", "u32", PERF_SAMPLE_CPU),
+  ("period", "u64", PERF_SAMPLE_PERIOD),
+  # Don't want to handle read format that comes after the period and
+  # before the callchain and has variable size.
+  ("nr", "u64", PERF_SAMPLE_CALLCHAIN)
+  # Raw data follows the callchain and is ignored.
+]
+
+
+PERF_SAMPLE_EVENT_IP_FORMAT = "u64"
+
+
+PERF_RECORD_MMAP = 1
+PERF_RECORD_SAMPLE = 9
+
+
+class TraceReader(object):
+  """Perf (linux-2.6/tools/perf) trace file reader."""
+
+  _TRACE_HEADER_MAGIC = 4993446653023372624
+
+  def __init__(self, trace_name):
+    self.trace_file = open(trace_name, "r")
+    self.trace = mmap.mmap(self.trace_file.fileno(), 0, mmap.MAP_PRIVATE)
+    self.trace_header = TRACE_HEADER_DESC.Read(self.trace, 0)
+    if self.trace_header.magic != TraceReader._TRACE_HEADER_MAGIC:
+      print >>sys.stderr, "Warning: unsupported trace header magic"
+    self.offset = self.trace_header.data_offset
+    self.limit = self.trace_header.data_offset + self.trace_header.data_size
+    assert self.limit <= self.trace.size(), \
+        "Trace data limit exceeds trace file size"
+    self.header_size = ctypes.sizeof(PERF_EVENT_HEADER_DESC.ctype)
+    assert self.trace_header.attrs_size != 0, \
+        "No perf event attributes found in the trace"
+    perf_event_attr = PERF_EVENT_ATTR_DESC.Read(self.trace,
+                                                self.trace_header.attrs_offset)
+    self.sample_event_body_desc = self._SampleEventBodyDesc(
+        perf_event_attr.sample_type)
+    self.callchain_supported = \
+        (perf_event_attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0
+    if self.callchain_supported:
+      self.ip_struct = Descriptor.CTYPE_MAP[PERF_SAMPLE_EVENT_IP_FORMAT]
+      self.ip_size = ctypes.sizeof(self.ip_struct)
+
+  def ReadEventHeader(self):
+    if self.offset >= self.limit:
+      return None, 0
+    offset = self.offset
+    header = PERF_EVENT_HEADER_DESC.Read(self.trace, self.offset)
+    self.offset += header.size
+    return header, offset
+
+  def ReadMmap(self, header, offset):
+    mmap_info = PERF_MMAP_EVENT_BODY_DESC.Read(self.trace,
+                                               offset + self.header_size)
+    # Read null-padded filename.
+    filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
+                          offset + header.size].rstrip(chr(0))
+    mmap_info.filename = filename
+    return mmap_info
+
+  def ReadSample(self, header, offset):
+    sample = self.sample_event_body_desc.Read(self.trace,
+                                              offset + self.header_size)
+    if not self.callchain_supported:
+      return sample
+    sample.ips = []
+    offset += self.header_size + ctypes.sizeof(sample)
+    for _ in xrange(sample.nr):
+      sample.ips.append(
+        self.ip_struct.from_buffer(self.trace, offset).value)
+      offset += self.ip_size
+    return sample
+
+  def Dispose(self):
+    self.trace.close()
+    self.trace_file.close()
+
+  def _SampleEventBodyDesc(self, sample_type):
+    assert (sample_type & PERF_SAMPLE_READ) == 0, \
+           "Can't hande read format in samples"
+    fields = [(field, format)
+              for (field, format, bit) in PERF_SAMPLE_EVENT_BODY_FIELDS
+              if (bit & sample_type) != 0]
+    return Descriptor(fields)
+
+
+OBJDUMP_SECTION_HEADER_RE = re.compile(
+  r"^\s*\d+\s(\.\S+)\s+[a-f0-9]")
+OBJDUMP_SYMBOL_LINE_RE = re.compile(
+  r"^([a-f0-9]+)\s(.{7})\s(\S+)\s+([a-f0-9]+)\s+(?:\.hidden\s+)?(.*)$")
+OBJDUMP_DYNAMIC_SYMBOLS_START_RE = re.compile(
+   r"^DYNAMIC SYMBOL TABLE")
+KERNEL_ALLSYMS_FILE = "/proc/kallsyms"
+PERF_KERNEL_ALLSYMS_RE = re.compile(
+  r".*kallsyms.*")
+KERNEL_ALLSYMS_LINE_RE = re.compile(
+  r"^([a-f0-9]+)\s(?:t|T)\s(\S+)$")
+
+
+class LibraryRepo(object):
+  def __init__(self):
+    self.infos = []
+    self.names = set()
+    self.ticks = {}
+
+  def Load(self, mmap_info, code_map, options):
+    # Skip kernel mmaps when requested using the fact that their tid
+    # is 0.
+    if mmap_info.tid == 0 and not options.kernel:
+      return True
+    if PERF_KERNEL_ALLSYMS_RE.match(mmap_info.filename):
+      return self._LoadKernelSymbols(code_map)
+    self.infos.append(mmap_info)
+    mmap_info.ticks = 0
+    mmap_info.unique_name = self._UniqueMmapName(mmap_info)
+    if not os.path.exists(mmap_info.filename):
+      return True
+    # Request section headers (-h), symbols (-t), and dynamic symbols
+    # (-T) from objdump.
+    # Unfortunately, section headers span two lines, so we have to
+    # keep the just seen section name (from the first line in each
+    # section header) in the after_section variable.
+    process = subprocess.Popen(
+      "%s -h -t -T -C %s" % (OBJDUMP_BIN, mmap_info.filename),
+      shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    pipe = process.stdout
+    after_section = None
+    code_sections = set()
+    reloc_sections = set()
+    dynamic = False
+    try:
+      for line in pipe:
+        if after_section:
+          if line.find("CODE") != -1:
+            code_sections.add(after_section)
+          if line.find("RELOC") != -1:
+            reloc_sections.add(after_section)
+          after_section = None
+          continue
+
+        match = OBJDUMP_SECTION_HEADER_RE.match(line)
+        if match:
+          after_section = match.group(1)
+          continue
+
+        if OBJDUMP_DYNAMIC_SYMBOLS_START_RE.match(line):
+          dynamic = True
+          continue
+
+        match = OBJDUMP_SYMBOL_LINE_RE.match(line)
+        if match:
+          start_address = int(match.group(1), 16)
+          origin_offset = start_address
+          flags = match.group(2)
+          section = match.group(3)
+          if section in code_sections:
+            if dynamic or section in reloc_sections:
+              start_address += mmap_info.addr
+            size = int(match.group(4), 16)
+            name = match.group(5)
+            origin = mmap_info.filename
+            code_map.Add(Code(name, start_address, start_address + size,
+                              origin, origin_offset))
+    finally:
+      pipe.close()
+    assert process.wait() == 0, "Failed to objdump %s" % mmap_info.filename
+
+  def Tick(self, pc):
+    for i, mmap_info in enumerate(self.infos):
+      if mmap_info.addr <= pc < (mmap_info.addr + mmap_info.len):
+        mmap_info.ticks += 1
+        self.infos[0], self.infos[i] = mmap_info, self.infos[0]
+        return True
+    return False
+
+  def _UniqueMmapName(self, mmap_info):
+    name = mmap_info.filename
+    index = 1
+    while name in self.names:
+      name = "%s-%d" % (mmap_info.filename, index)
+      index += 1
+    self.names.add(name)
+    return name
+
+  def _LoadKernelSymbols(self, code_map):
+    if not os.path.exists(KERNEL_ALLSYMS_FILE):
+      print >>sys.stderr, "Warning: %s not found" % KERNEL_ALLSYMS_FILE
+      return False
+    kallsyms = open(KERNEL_ALLSYMS_FILE, "r")
+    code = None
+    for line in kallsyms:
+      match = KERNEL_ALLSYMS_LINE_RE.match(line)
+      if match:
+        start_address = int(match.group(1), 16)
+        end_address = start_address
+        name = match.group(2)
+        if code:
+          code.end_address = start_address
+          code_map.Add(code, 16)
+        code = Code(name, start_address, end_address, "kernel", 0)
+    return True
+
+
+def PrintReport(code_map, library_repo, code_info, options):
+  print "Ticks per symbol:"
+  used_code = [code for code in code_map.UsedCode()]
+  used_code.sort(key=lambda x: x.self_ticks, reverse=True)
+  for i, code in enumerate(used_code):
+    print "%10d %s [%s]" % (code.self_ticks, code.FullName(), code.origin)
+    if options.disasm_all or i < options.disasm_top:
+      code.PrintAnnotated(code_info, options)
+  print
+  print "Ticks per library:"
+  mmap_infos = [m for m in library_repo.infos]
+  mmap_infos.sort(key=lambda m: m.ticks, reverse=True)
+  for mmap_info in mmap_infos:
+    print "%10d %s" % (mmap_info.ticks, mmap_info.unique_name)
+
+
+def PrintDot(code_map, options):
+  print "digraph G {"
+  for code in code_map.UsedCode():
+    if code.self_ticks < 10:
+      continue
+    print "n%d [shape=box,label=\"%s\"];" % (code.id, code.name)
+    if code.callee_ticks:
+      for callee, ticks in code.callee_ticks.iteritems():
+        print "n%d -> n%d [label=\"%d\"];" % (code.id, callee.id, ticks)
+  print "}"
+
+
+if __name__ == "__main__":
+  parser = optparse.OptionParser(USAGE)
+  parser.add_option("--snapshot-log",
+                    default="obj/release/snapshot.log",
+                    help="V8 snapshot log file name [default: %default]")
+  parser.add_option("--log",
+                    default="v8.log",
+                    help="V8 log file name [default: %default]")
+  parser.add_option("--snapshot",
+                    default=False,
+                    action="store_true",
+                    help="process V8 snapshot log [default: %default]")
+  parser.add_option("--trace",
+                    default="perf.data",
+                    help="perf trace file name [default: %default]")
+  parser.add_option("--kernel",
+                    default=False,
+                    action="store_true",
+                    help="process kernel entries [default: %default]")
+  parser.add_option("--disasm-top",
+                    default=0,
+                    type="int",
+                    help=("number of top symbols to disassemble and annotate "
+                          "[default: %default]"))
+  parser.add_option("--disasm-all",
+                    default=False,
+                    action="store_true",
+                    help=("disassemble and annotate all used symbols "
+                          "[default: %default]"))
+  parser.add_option("--dot",
+                    default=False,
+                    action="store_true",
+                    help="produce dot output (WIP) [default: %default]")
+  parser.add_option("--quiet", "-q",
+                    default=False,
+                    action="store_true",
+                    help="no auxiliary messages [default: %default]")
+  options, args = parser.parse_args()
+
+  if not options.quiet:
+    if options.snapshot:
+      print "V8 logs: %s, %s, %s.code" % (options.snapshot_log,
+                                          options.log,
+                                          options.log)
+    else:
+      print "V8 log: %s, %s.code (no snapshot)" % (options.log, options.log)
+    print "Perf trace file: %s" % options.trace
+
+  # Stats.
+  events = 0
+  ticks = 0
+  missed_ticks = 0
+  really_missed_ticks = 0
+  mmap_time = 0
+  sample_time = 0
+
+  # Initialize the log reader and get the code info.
+  code_map = CodeMap()
+  snapshot_name_map = {}
+  log_reader = CodeLogReader(log_name=options.log,
+                             code_map=code_map,
+                             is_snapshot=False,
+                             snapshot_pos_to_name=snapshot_name_map)
+  code_info = log_reader.ReadCodeInfo()
+  if not options.quiet:
+    print "Generated code architecture: %s" % code_info.arch
+    print
+
+  # Process the snapshot log to fill the snapshot name map.
+  if options.snapshot:
+    snapshot_log_reader = CodeLogReader(log_name=options.snapshot_log,
+                                        code_map=CodeMap(),
+                                        is_snapshot=True,
+                                        snapshot_pos_to_name=snapshot_name_map)
+    while snapshot_log_reader.ReadUpToGC(code_info):
+      pass
+
+  # Process the code and trace logs.
+  library_repo = LibraryRepo()
+  log_reader.ReadUpToGC(code_info)
+  trace_reader = TraceReader(options.trace)
+  while True:
+    header, offset = trace_reader.ReadEventHeader()
+    if not header:
+      break
+    events += 1
+    if header.type == PERF_RECORD_MMAP:
+      start = time.time()
+      mmap_info = trace_reader.ReadMmap(header, offset)
+      if mmap_info.filename == V8_GC_FAKE_MMAP:
+        log_reader.ReadUpToGC()
+      else:
+        library_repo.Load(mmap_info, code_map, options)
+      mmap_time += time.time() - start
+    elif header.type == PERF_RECORD_SAMPLE:
+      ticks += 1
+      start = time.time()
+      sample = trace_reader.ReadSample(header, offset)
+      code = code_map.Find(sample.ip)
+      if code:
+        code.Tick(sample.ip)
+      else:
+        missed_ticks += 1
+      if not library_repo.Tick(sample.ip) and not code:
+        really_missed_ticks += 1
+      if trace_reader.callchain_supported:
+        for ip in sample.ips:
+          caller_code = code_map.Find(ip)
+          if caller_code:
+            if code:
+              caller_code.CalleeTick(code)
+            code = caller_code
+      sample_time += time.time() - start
+
+  if options.dot:
+    PrintDot(code_map, options)
+  else:
+    PrintReport(code_map, library_repo, code_info, options)
+
+    if not options.quiet:
+      print
+      print "Stats:"
+      print "%10d total trace events" % events
+      print "%10d total ticks" % ticks
+      print "%10d ticks not in symbols" % missed_ticks
+      print "%10d unaccounted ticks" % really_missed_ticks
+      print "%10d total symbols" % len([c for c in code_map.AllCode()])
+      print "%10d used symbols" % len([c for c in code_map.UsedCode()])
+      print "%9.2fs library processing time" % mmap_time
+      print "%9.2fs tick processing time" % sample_time
+
+  log_reader.Dispose()
+  trace_reader.Dispose()
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index 47152f8..08558cc 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -220,8 +220,6 @@
 		9F73E3B2114E61A100F84A5A /* profile-generator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F73E3AF114E61A100F84A5A /* profile-generator.cc */; };
 		9F92FAA90F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
 		9F92FAAA0F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
-		9FA37335116DD9F000C4CD55 /* vm-state.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA37333116DD9F000C4CD55 /* vm-state.cc */; };
-		9FA37336116DD9F000C4CD55 /* vm-state.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA37333116DD9F000C4CD55 /* vm-state.cc */; };
 		9FA38BB31175B2D200C4CD55 /* data-flow.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9C1175B2D200C4CD55 /* data-flow.cc */; };
 		9FA38BB41175B2D200C4CD55 /* diy-fp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */; };
 		9FA38BB51175B2D200C4CD55 /* fast-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */; };
@@ -590,7 +588,6 @@
 		9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "func-name-inferrer.h"; sourceTree = "<group>"; };
 		9FA36F62116BA26500C4CD55 /* v8-profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "v8-profiler.h"; sourceTree = "<group>"; };
 		9FA37332116DD9F000C4CD55 /* vm-state-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "vm-state-inl.h"; sourceTree = "<group>"; };
-		9FA37333116DD9F000C4CD55 /* vm-state.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "vm-state.cc"; sourceTree = "<group>"; };
 		9FA37334116DD9F000C4CD55 /* vm-state.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "vm-state.h"; sourceTree = "<group>"; };
 		9FA38B9B1175B2D200C4CD55 /* cached-powers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "cached-powers.h"; sourceTree = "<group>"; };
 		9FA38B9C1175B2D200C4CD55 /* data-flow.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "data-flow.cc"; sourceTree = "<group>"; };
@@ -1013,7 +1010,6 @@
 				58950D5A0F55514900F3E8BA /* virtual-frame.cc */,
 				58950D5B0F55514900F3E8BA /* virtual-frame.h */,
 				9FA37332116DD9F000C4CD55 /* vm-state-inl.h */,
-				9FA37333116DD9F000C4CD55 /* vm-state.cc */,
 				9FA37334116DD9F000C4CD55 /* vm-state.h */,
 				897FF1A10E719B8F00D62E90 /* zone-inl.h */,
 				897FF1A20E719B8F00D62E90 /* zone.cc */,
@@ -1377,7 +1373,6 @@
 				9FA38BC71175B2E500C4CD55 /* virtual-frame-ia32.cc in Sources */,
 				58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */,
 				58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */,
-				9FA37336116DD9F000C4CD55 /* vm-state.cc in Sources */,
 				89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */,
 				C68081B112251239001EAFE4 /* code-stubs-ia32.cc in Sources */,
 			);
@@ -1503,7 +1498,6 @@
 				58950D690F5551CE00F3E8BA /* virtual-frame-light.cc in Sources */,
 				58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */,
 				58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */,
-				9FA37335116DD9F000C4CD55 /* vm-state.cc in Sources */,
 				89F23C820E78D5B2006B2466 /* zone.cc in Sources */,
 				C68081AD1225120B001EAFE4 /* code-stubs-arm.cc in Sources */,
 			);
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index 6de6223..62d4501 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -1082,10 +1082,6 @@
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\vm-state.cc"
-				>
-			</File>
-			<File
 				RelativePath="..\..\src\vm-state-inl.h"
 				>
 			</File>
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index 12190e2..4f9ff4c 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -1060,10 +1060,6 @@
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\vm-state.cc"
-				>
-			</File>
-			<File
 				RelativePath="..\..\src\vm-state-inl.h"
 				>
 			</File>
diff --git a/tools/visual_studio/v8_base_x64.vcproj b/tools/visual_studio/v8_base_x64.vcproj
index 4de6150..c84bce2 100644
--- a/tools/visual_studio/v8_base_x64.vcproj
+++ b/tools/visual_studio/v8_base_x64.vcproj
@@ -1042,10 +1042,6 @@
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\vm-state.cc"
-				>
-			</File>
-			<File
 				RelativePath="..\..\src\vm-state-inl.h"
 				>
 			</File>