Global lock levels.

Introduce the notion of the mutators/GC being a shared-exclusive (aka
reader-writer) lock. Introduce globally ordered locks, analysable by
annotalysis, statically at compile time. Add locking attributes to
methods.

More subtly, remove the heap_lock_ and split between various locks that
are held for smaller periods (where work doesn't get blocked). Remove
buggy Dalvik style thread transitions. Make GC use CMS in all cases when
concurrent is enabled. Fix bug where suspend counts rather than debug
suspend counts were sent to JDWP. Move the PathClassLoader to
WellKnownClasses. In debugger refactor calls to send request and
possibly suspend. Break apart different VmWait thread states. Move
identity hash code to a shared method.

Change-Id: Icdbfc3ce3fcccd14341860ac7305d8e97b51f5c6
diff --git a/src/stack.h b/src/stack.h
index fb0bc48..578c631 100644
--- a/src/stack.h
+++ b/src/stack.h
@@ -32,11 +32,9 @@
 class Object;
 class ShadowFrame;
 class StackIndirectReferenceTable;
-class ScopedJniThreadState;
+class ScopedObjectAccess;
 class Thread;
 
-jobject GetThreadStack(const ScopedJniThreadState&, Thread*);
-
 class ShadowFrame {
  public:
   // Number of references contained within this shadow frame
@@ -217,6 +215,7 @@
  protected:
   StackVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack,
                Context* context)
+      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_)
       : stack_start_(stack), trace_stack_(trace_stack), cur_shadow_frame_(NULL),
         cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(0), cur_depth_(0),
         context_(context) {}
@@ -225,9 +224,10 @@
   virtual ~StackVisitor() {}
 
   // Return 'true' if we should continue to visit more frames, 'false' to stop.
-  virtual bool VisitFrame() = 0;
+  virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) = 0;
 
-  void WalkStack(bool include_transitions = false);
+  void WalkStack(bool include_transitions = false)
+      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
 
   Method* GetMethod() const {
     if (cur_shadow_frame_ != NULL) {
@@ -255,28 +255,30 @@
     return *reinterpret_cast<uintptr_t*>(save_addr);
   }
 
-  uint32_t GetDexPc() const;
+  uint32_t GetDexPc() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
 
   // Returns the height of the stack in the managed stack frames, including transitions.
-  size_t GetFrameHeight() {
+  size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
     return GetNumFrames() - cur_depth_;
   }
 
   // Returns a frame ID for JDWP use, starting from 1.
-  size_t GetFrameId() {
+  size_t GetFrameId() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
     return GetFrameHeight() + 1;
   }
 
-  size_t GetNumFrames() {
+  size_t GetNumFrames() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
     if (num_frames_ == 0) {
       num_frames_ = ComputeNumFrames();
     }
     return num_frames_;
   }
 
-  uint32_t GetVReg(Method* m, int vreg) const;
+  uint32_t GetVReg(Method* m, int vreg) const
+      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
 
-  void SetVReg(Method* m, int vreg, uint32_t new_value);
+  void SetVReg(Method* m, int vreg, uint32_t new_value)
+      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
 
   uintptr_t GetGPR(uint32_t reg) const;
 
@@ -368,13 +370,13 @@
   }
 
  private:
-  size_t ComputeNumFrames() const;
+  size_t ComputeNumFrames() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
 
   TraceStackFrame GetTraceStackFrame(uint32_t depth) const {
     return trace_stack_->at(trace_stack_->size() - depth - 1);
   }
 
-  void SanityCheckFrame();
+  void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
 
   const ManagedStack* const stack_start_;
   const std::vector<TraceStackFrame>* const trace_stack_;