Merge "Change shadow_frame_size_ to num_shadow_frame_refs_. Fix LLVM build." into dalvik-dev
diff --git a/src/check_jni.cc b/src/check_jni.cc
index e7590d3..7c5186a 100644
--- a/src/check_jni.cc
+++ b/src/check_jni.cc
@@ -208,6 +208,7 @@
         // obj will be NULL.  Otherwise, obj should always be non-NULL
         // and valid.
         if (!Runtime::Current()->GetHeap()->IsHeapAddress(obj)) {
+          Runtime::Current()->GetHeap()->DumpSpaces();
           JniAbortF(function_name_, "field operation on invalid %s: %p",
                     ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
           return;
@@ -244,6 +245,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     Object* o = soa_.Decode<Object*>(java_object);
     if (o == NULL || !Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
+      Runtime::Current()->GetHeap()->DumpSpaces();
       JniAbortF(function_name_, "field operation on invalid %s: %p",
                 ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
       return;
@@ -623,6 +625,7 @@
 
     Object* obj = soa_.Decode<Object*>(java_object);
     if (!Runtime::Current()->GetHeap()->IsHeapAddress(obj)) {
+      Runtime::Current()->GetHeap()->DumpSpaces();
       JniAbortF(function_name_, "%s is an invalid %s: %p (%p)",
                 what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object, obj);
       return false;
@@ -676,6 +679,7 @@
 
     Array* a = soa_.Decode<Array*>(java_array);
     if (!Runtime::Current()->GetHeap()->IsHeapAddress(a)) {
+      Runtime::Current()->GetHeap()->DumpSpaces();
       JniAbortF(function_name_, "jarray is an invalid %s: %p (%p)",
                 ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(), java_array, a);
     } else if (!a->IsArrayInstance()) {
@@ -696,6 +700,7 @@
     }
     Field* f = soa_.DecodeField(fid);
     if (!Runtime::Current()->GetHeap()->IsHeapAddress(f) || !f->IsField()) {
+      Runtime::Current()->GetHeap()->DumpSpaces();
       JniAbortF(function_name_, "invalid jfieldID: %p", fid);
       return NULL;
     }
@@ -709,6 +714,7 @@
     }
     AbstractMethod* m = soa_.DecodeMethod(mid);
     if (!Runtime::Current()->GetHeap()->IsHeapAddress(m) || !m->IsMethod()) {
+      Runtime::Current()->GetHeap()->DumpSpaces();
       JniAbortF(function_name_, "invalid jmethodID: %p", mid);
       return NULL;
     }
@@ -729,6 +735,7 @@
 
     Object* o = soa_.Decode<Object*>(java_object);
     if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
+      Runtime::Current()->GetHeap()->DumpSpaces();
       // TODO: when we remove work_around_app_jni_bugs, this should be impossible.
       JniAbortF(function_name_, "native code passing in reference to invalid %s: %p",
                 ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
diff --git a/src/class_linker.cc b/src/class_linker.cc
index 83661cb..0903781 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -222,6 +222,7 @@
       class_roots_(NULL),
       array_iftable_(NULL),
       init_done_(false),
+      is_dirty_(false),
       intern_table_(intern_table) {
   CHECK_EQ(arraysize(class_roots_descriptors_), size_t(kClassRootsMax));
 }
@@ -1043,7 +1044,7 @@
 // Keep in sync with InitCallback. Anything we visit, we need to
 // reinit references to when reinitializing a ClassLinker from a
 // mapped image.
-void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
+void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
   visitor(class_roots_, arg);
   Thread* self = Thread::Current();
   {
@@ -1065,6 +1066,7 @@
   }
 
   visitor(array_iftable_, arg);
+  is_dirty_ = false;
 }
 
 void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) const {
@@ -1746,6 +1748,7 @@
   CHECK(dex_cache->GetLocation()->Equals(dex_file.GetLocation()));
   dex_caches_.push_back(dex_cache.get());
   dex_cache->SetDexFile(&dex_file);
+  Dirty();
 }
 
 void ClassLinker::RegisterDexFile(const DexFile& dex_file) {
@@ -1990,6 +1993,7 @@
     return existing;
   }
   classes.insert(std::make_pair(hash, klass));
+  Dirty();
   return NULL;
 }
 
diff --git a/src/class_linker.h b/src/class_linker.h
index 096d602..460fcd2 100644
--- a/src/class_linker.h
+++ b/src/class_linker.h
@@ -262,7 +262,7 @@
   void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) const
       LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
 
-  void VisitRoots(Heap::RootVisitor* visitor, void* arg) const
+  void VisitRoots(Heap::RootVisitor* visitor, void* arg)
       LOCKS_EXCLUDED(Locks::classlinker_classes_lock_, dex_lock_);
 
   DexCache* FindDexCache(const DexFile& dex_file) const
@@ -378,6 +378,14 @@
   pid_t GetClassesLockOwner(); // For SignalCatcher.
   pid_t GetDexLockOwner(); // For SignalCatcher.
 
+  bool IsDirty() const {
+    return is_dirty_;
+  }
+
+  void Dirty() {
+    is_dirty_ = true;
+  }
+
  private:
   explicit ClassLinker(InternTable*);
 
@@ -636,6 +644,7 @@
   IfTable* array_iftable_;
 
   bool init_done_;
+  bool is_dirty_;
 
   InternTable* intern_table_;
 
diff --git a/src/gc/large_object_space.cc b/src/gc/large_object_space.cc
index 72c6c73..b066dd5 100644
--- a/src/gc/large_object_space.cc
+++ b/src/gc/large_object_space.cc
@@ -267,4 +267,10 @@
   return reinterpret_cast<Object*>(addr);
 }
 
+void FreeListSpace::Dump(std::ostream& os) const{
+  os << GetName() << " -"
+     << " begin: " << reinterpret_cast<void*>(Begin())
+     << " end: " << reinterpret_cast<void*>(End());
+}
+
 }
diff --git a/src/gc/large_object_space.h b/src/gc/large_object_space.h
index 2bf6abf..979fce6 100644
--- a/src/gc/large_object_space.h
+++ b/src/gc/large_object_space.h
@@ -131,6 +131,9 @@
   size_t Size() const {
     return End() - Begin();
   }
+
+  virtual void Dump(std::ostream& os) const;
+
  private:
   static const size_t kAlignment = kPageSize;
 
diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc
index 03bbb6a..da6a593 100644
--- a/src/gc/mark_sweep.cc
+++ b/src/gc/mark_sweep.cc
@@ -79,6 +79,8 @@
   FindDefaultMarkBitmap();
   // TODO: if concurrent, enable card marking in compiler
   // TODO: check that the mark bitmap is entirely clear.
+  // Mark any concurrent roots as dirty since we need to scan them at least once during this GC.
+  Runtime::Current()->DirtyRoots();
 }
 
 void MarkSweep::FindDefaultMarkBitmap() {
@@ -177,7 +179,7 @@
 
 void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const AbstractMethod* method) {
   // See if the root is on any space bitmap.
-  if (heap_->FindSpaceFromObject(root) == NULL) {
+  if (GetHeap()->GetLiveBitmap()->GetSpaceBitmap(root) == NULL) {
     LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
     if (large_object_space->Contains(root)) {
       LOG(ERROR) << "Found invalid root: " << root;
@@ -195,7 +197,11 @@
 
 // Marks all objects in the root set.
 void MarkSweep::MarkRoots() {
-  Runtime::Current()->VisitRoots(MarkObjectVisitor, this);
+  Runtime::Current()->VisitNonConcurrentRoots(MarkObjectVisitor, this);
+}
+
+void MarkSweep::MarkConcurrentRoots() {
+  Runtime::Current()->VisitConcurrentRoots(MarkObjectVisitor, this);
 }
 
 class CheckObjectVisitor {
diff --git a/src/gc/mark_sweep.h b/src/gc/mark_sweep.h
index 76c5428..ed74f99 100644
--- a/src/gc/mark_sweep.h
+++ b/src/gc/mark_sweep.h
@@ -52,6 +52,9 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  void MarkConcurrentRoots();
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
   // Verify that image roots point to only marked objects within the alloc space.
   void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
diff --git a/src/gc/space.cc b/src/gc/space.cc
index 4d5ce93..a7a5942 100644
--- a/src/gc/space.cc
+++ b/src/gc/space.cc
@@ -434,7 +434,6 @@
     new_size = current_space_size;
   }
   mspace_set_footprint_limit(mspace_, new_size);
-  LOG(INFO) << "Setting footprint limit to " << new_size;
 }
 
 size_t ImageSpace::bitmap_index_ = 0;
diff --git a/src/heap.cc b/src/heap.cc
index 98845d8..84030ba 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -536,7 +536,9 @@
               << live_bitmap << " " << *live_bitmap << "\n"
               << mark_bitmap << " " << *mark_bitmap;
   }
-  // TODO: Dump large object space?
+  if (large_object_space_.get() != NULL) {
+    large_object_space_->Dump(LOG(INFO));
+  }
 }
 
 void Heap::VerifyObjectBody(const Object* obj) {
@@ -1045,6 +1047,7 @@
     mark_sweep.FindDefaultMarkBitmap();
 
     mark_sweep.MarkRoots();
+    mark_sweep.MarkConcurrentRoots();
     timings.AddSplit("MarkRoots");
 
     // Roots are marked on the bitmap and the mark_stack is empty.
@@ -1458,9 +1461,13 @@
     if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
         (gc_type == kGcTypeFull &&
             space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)) {
-      live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap());
-      mark_bitmap_->ReplaceBitmap(space->GetMarkBitmap(), space->GetLiveBitmap());
-      space->AsAllocSpace()->SwapBitmaps();
+      SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+      if (live_bitmap != mark_bitmap) {
+        live_bitmap_->ReplaceBitmap(live_bitmap, mark_bitmap);
+        mark_bitmap_->ReplaceBitmap(mark_bitmap, live_bitmap);
+        space->AsAllocSpace()->SwapBitmaps();
+      }
     }
   }
   SwapLargeObjects();
@@ -1615,6 +1622,10 @@
       root_end = NanoTime();
       timings.AddSplit("RootEnd");
 
+      // Mark the roots which we can do concurrently.
+      mark_sweep.MarkConcurrentRoots();
+      timings.AddSplit("MarkConcurrentRoots");
+
       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
       UpdateAndMarkModUnion(&mark_sweep, timings, gc_type);
 
@@ -1695,11 +1706,12 @@
 
     if (verify_post_gc_heap_) {
       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-      SwapBitmaps(gc_type);
+      // Swapping bound bitmaps does nothing.
+      SwapBitmaps(kGcTypeFull);
       if (!VerifyHeapReferences()) {
         LOG(FATAL) << "Post " << gc_type_str.str() << "Gc verification failed";
       }
-      SwapBitmaps(gc_type);
+      SwapBitmaps(kGcTypeFull);
       timings.AddSplit("VerifyHeapReferencesPostGC");
     }
 
diff --git a/src/intern_table.cc b/src/intern_table.cc
index 5ad3958..817ce1e 100644
--- a/src/intern_table.cc
+++ b/src/intern_table.cc
@@ -21,7 +21,7 @@
 
 namespace art {
 
-InternTable::InternTable() : intern_table_lock_("InternTable lock") {
+InternTable::InternTable() : intern_table_lock_("InternTable lock"), is_dirty_(false) {
 }
 
 size_t InternTable::Size() const {
@@ -36,12 +36,13 @@
      << image_strong_interns_.size() << " image strong\n";
 }
 
-void InternTable::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
+void InternTable::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
   MutexLock mu(Thread::Current(), intern_table_lock_);
   typedef Table::const_iterator It; // TODO: C++0x auto
   for (It it = strong_interns_.begin(), end = strong_interns_.end(); it != end; ++it) {
     visitor(it->second, arg);
   }
+  is_dirty_ = false;
   // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
 }
 
@@ -97,6 +98,9 @@
       return image;
     }
 
+    // Mark as dirty so that we rescan the roots.
+    Dirty();
+
     // There is no match in the strong table, check the weak table.
     String* weak = Lookup(weak_interns_, s, hash_code);
     if (weak != NULL) {
diff --git a/src/intern_table.h b/src/intern_table.h
index 6f56773..93d20b2 100644
--- a/src/intern_table.h
+++ b/src/intern_table.h
@@ -65,10 +65,15 @@
 
   size_t Size() const;
 
-  void VisitRoots(Heap::RootVisitor* visitor, void* arg) const;
+  void VisitRoots(Heap::RootVisitor* visitor, void* arg);
 
   void DumpForSigQuit(std::ostream& os) const;
 
+  bool IsDirty() const { return is_dirty_; }
+  void Dirty() {
+    is_dirty_ = true;
+  }
+
  private:
   typedef std::multimap<int32_t, String*> Table;
 
@@ -81,6 +86,7 @@
   void Remove(Table& table, const String* s, uint32_t hash_code);
 
   mutable Mutex intern_table_lock_;
+  bool is_dirty_;
   Table image_strong_interns_ GUARDED_BY(intern_table_lock_);
   Table strong_interns_ GUARDED_BY(intern_table_lock_);
   Table weak_interns_ GUARDED_BY(intern_table_lock_);
diff --git a/src/runtime.cc b/src/runtime.cc
index f93d687..4b7338b 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -993,10 +993,17 @@
   thread_list_->Unregister(self);
 }
 
-void Runtime::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
+void Runtime::VisitConcurrentRoots(Heap::RootVisitor* visitor, void* arg) {
+  if (intern_table_->IsDirty()) {
+    intern_table_->VisitRoots(visitor, arg);
+  }
+  if (class_linker_->IsDirty()) {
+    class_linker_->VisitRoots(visitor, arg);
+  }
+}
+
+void Runtime::VisitNonConcurrentRoots(Heap::RootVisitor* visitor, void* arg) {
   Dbg::VisitRoots(visitor, arg);
-  class_linker_->VisitRoots(visitor, arg);
-  intern_table_->VisitRoots(visitor, arg);
   java_vm_->VisitRoots(visitor, arg);
   thread_list_->VisitRoots(visitor, arg);
   if (pre_allocated_OutOfMemoryError_ != NULL) {
@@ -1013,6 +1020,16 @@
   }
 }
 
+void Runtime::DirtyRoots() {
+  intern_table_->Dirty();
+  class_linker_->Dirty();
+}
+
+void Runtime::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
+  VisitConcurrentRoots(visitor, arg);
+  VisitNonConcurrentRoots(visitor, arg);
+}
+
 void Runtime::SetJniDlsymLookupStub(ByteArray* jni_stub_array) {
   CHECK(jni_stub_array != NULL)  << " jni_stub_array=" << jni_stub_array;
   CHECK(jni_stub_array_ == NULL || jni_stub_array_ == jni_stub_array)
diff --git a/src/runtime.h b/src/runtime.h
index a6c662c..44823a0 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -224,9 +224,20 @@
     return "2.0.0";
   }
 
-  void VisitRoots(Heap::RootVisitor* visitor, void* arg) const
+  // Force all the roots which can be marked concurrently to be dirty.
+  void DirtyRoots();
+
+  // Visit all the roots.
+  void VisitRoots(Heap::RootVisitor* visitor, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Visit all of the roots we can do safely do concurrently.
+  void VisitConcurrentRoots(Heap::RootVisitor* visitor, void* arg);
+
+  // Visit all other roots which must be done with mutators suspended.
+  void VisitNonConcurrentRoots(Heap::RootVisitor* visitor, void* arg)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   bool HasJniDlsymLookupStub() const {
     return jni_stub_array_ != NULL;
   }