Merge "Register debugger for interesting instrumentation events only"
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index be60ee2..e66c370 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -228,6 +228,15 @@
 size_t Dbg::full_deoptimization_event_count_ = 0;
 size_t Dbg::delayed_full_undeoptimization_count_ = 0;
 
+// Instrumentation event reference counters.
+size_t Dbg::dex_pc_change_event_ref_count_ = 0;
+size_t Dbg::method_enter_event_ref_count_ = 0;
+size_t Dbg::method_exit_event_ref_count_ = 0;
+size_t Dbg::field_read_event_ref_count_ = 0;
+size_t Dbg::field_write_event_ref_count_ = 0;
+size_t Dbg::exception_catch_event_ref_count_ = 0;
+uint32_t Dbg::instrumentation_events_ = 0;
+
 // Breakpoints.
 static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
 
@@ -641,14 +650,6 @@
   return gDisposed;
 }
 
-// All the instrumentation events the debugger is registered for.
-static constexpr uint32_t kListenerEvents = instrumentation::Instrumentation::kMethodEntered |
-                                            instrumentation::Instrumentation::kMethodExited |
-                                            instrumentation::Instrumentation::kDexPcMoved |
-                                            instrumentation::Instrumentation::kFieldRead |
-                                            instrumentation::Instrumentation::kFieldWritten |
-                                            instrumentation::Instrumentation::kExceptionCaught;
-
 void Dbg::GoActive() {
   // Enable all debugging features, including scans for breakpoints.
   // This is a no-op if we're already active.
@@ -668,6 +669,12 @@
     CHECK_EQ(deoptimization_requests_.size(), 0U);
     CHECK_EQ(full_deoptimization_event_count_, 0U);
     CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
+    CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
+    CHECK_EQ(method_enter_event_ref_count_, 0U);
+    CHECK_EQ(method_exit_event_ref_count_, 0U);
+    CHECK_EQ(field_read_event_ref_count_, 0U);
+    CHECK_EQ(field_write_event_ref_count_, 0U);
+    CHECK_EQ(exception_catch_event_ref_count_, 0U);
   }
 
   Runtime* runtime = Runtime::Current();
@@ -676,7 +683,7 @@
   ThreadState old_state = self->SetStateUnsafe(kRunnable);
   CHECK_NE(old_state, kRunnable);
   runtime->GetInstrumentation()->EnableDeoptimization();
-  runtime->GetInstrumentation()->AddListener(&gDebugInstrumentationListener, kListenerEvents);
+  instrumentation_events_ = 0;
   gDebuggerActive = true;
   CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
   runtime->GetThreadList()->ResumeAll();
@@ -708,7 +715,11 @@
       full_deoptimization_event_count_ = 0U;
       delayed_full_undeoptimization_count_ = 0U;
     }
-    runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener, kListenerEvents);
+    if (instrumentation_events_ != 0) {
+      runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
+                                                    instrumentation_events_);
+      instrumentation_events_ = 0;
+    }
     runtime->GetInstrumentation()->DisableDeoptimization();
     gDebuggerActive = false;
   }
@@ -2663,6 +2674,25 @@
   }
 }
 
+size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
+  switch (instrumentation_event) {
+    case instrumentation::Instrumentation::kMethodEntered:
+      return &method_enter_event_ref_count_;
+    case instrumentation::Instrumentation::kMethodExited:
+      return &method_exit_event_ref_count_;
+    case instrumentation::Instrumentation::kDexPcMoved:
+      return &dex_pc_change_event_ref_count_;
+    case instrumentation::Instrumentation::kFieldRead:
+      return &field_read_event_ref_count_;
+    case instrumentation::Instrumentation::kFieldWritten:
+      return &field_write_event_ref_count_;
+    case instrumentation::Instrumentation::kExceptionCaught:
+      return &exception_catch_event_ref_count_;
+    default:
+      return nullptr;
+  }
+}
+
 // Process request while all mutator threads are suspended.
 void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
@@ -2670,6 +2700,19 @@
     case DeoptimizationRequest::kNothing:
       LOG(WARNING) << "Ignoring empty deoptimization request.";
       break;
+    case DeoptimizationRequest::kRegisterForEvent:
+      VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
+                                 request.instrumentation_event);
+      instrumentation->AddListener(&gDebugInstrumentationListener, request.instrumentation_event);
+      instrumentation_events_ |= request.instrumentation_event;
+      break;
+    case DeoptimizationRequest::kUnregisterForEvent:
+      VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
+                                 request.instrumentation_event);
+      instrumentation->RemoveListener(&gDebugInstrumentationListener,
+                                      request.instrumentation_event);
+      instrumentation_events_ &= ~request.instrumentation_event;
+      break;
     case DeoptimizationRequest::kFullDeoptimization:
       VLOG(jdwp) << "Deoptimize the world ...";
       instrumentation->DeoptimizeEverything();
@@ -2728,6 +2771,32 @@
 
 void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
   switch (req.kind) {
+    case DeoptimizationRequest::kRegisterForEvent: {
+      DCHECK_NE(req.instrumentation_event, 0u);
+      size_t* counter = GetReferenceCounterForEvent(req.instrumentation_event);
+      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
+                                                req.instrumentation_event);
+      if (*counter == 0) {
+        VLOG(jdwp) << StringPrintf("Queue request #%d to start listening to instrumentation event 0x%x",
+                                   deoptimization_requests_.size(), req.instrumentation_event);
+        deoptimization_requests_.push_back(req);
+      }
+      *counter = *counter + 1;
+      break;
+    }
+    case DeoptimizationRequest::kUnregisterForEvent: {
+      DCHECK_NE(req.instrumentation_event, 0u);
+      size_t* counter = GetReferenceCounterForEvent(req.instrumentation_event);
+      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
+                                                req.instrumentation_event);
+      *counter = *counter - 1;
+      if (*counter == 0) {
+        VLOG(jdwp) << StringPrintf("Queue request #%d to stop listening to instrumentation event 0x%x",
+                                   deoptimization_requests_.size(), req.instrumentation_event);
+        deoptimization_requests_.push_back(req);
+      }
+      break;
+    }
     case DeoptimizationRequest::kFullDeoptimization: {
       DCHECK(req.method == nullptr);
       if (full_deoptimization_event_count_ == 0) {
@@ -2740,7 +2809,6 @@
     }
     case DeoptimizationRequest::kFullUndeoptimization: {
       DCHECK(req.method == nullptr);
-      DCHECK_GT(full_deoptimization_event_count_, 0U);
       --full_deoptimization_event_count_;
       if (full_deoptimization_event_count_ == 0) {
         VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 1ad1a00..31ffd6e 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -129,21 +129,31 @@
   DISALLOW_COPY_AND_ASSIGN(SingleStepControl);
 };
 
+// TODO rename to InstrumentationRequest.
 struct DeoptimizationRequest {
   enum Kind {
     kNothing,                   // no action.
+    kRegisterForEvent,          // start listening for instrumentation event.
+    kUnregisterForEvent,        // stop listening for instrumentation event.
     kFullDeoptimization,        // deoptimize everything.
     kFullUndeoptimization,      // undeoptimize everything.
     kSelectiveDeoptimization,   // deoptimize one method.
     kSelectiveUndeoptimization  // undeoptimize one method.
   };
 
-  DeoptimizationRequest() : kind(kNothing), method(nullptr) {}
+  DeoptimizationRequest() : kind(kNothing), instrumentation_event(0), method(nullptr) {}
 
   void VisitRoots(RootCallback* callback, void* arg);
 
   Kind kind;
 
+  // TODO we could use a union to hold the instrumentation_event and the method since they
+  // respectively have sense only for kRegisterForEvent/kUnregisterForEvent and
+  // kSelectiveDeoptimization/kSelectiveUndeoptimization.
+
+  // Event to start or stop listening to. Only for kRegisterForEvent and kUnregisterForEvent.
+  uint32_t instrumentation_event;
+
   // Method for selective deoptimization.
   mirror::ArtMethod* method;
 };
@@ -581,11 +591,13 @@
   static size_t alloc_record_count_ GUARDED_BY(alloc_tracker_lock_);
 
   // Guards deoptimization requests.
+  // TODO rename to instrumentation_update_lock.
   static Mutex* deoptimization_lock_ ACQUIRED_AFTER(Locks::breakpoint_lock_);
 
   // Deoptimization requests to be processed each time the event list is updated. This is used when
   // registering and unregistering events so we do not deoptimize while holding the event list
   // lock.
+  // TODO rename to instrumentation_requests.
   static std::vector<DeoptimizationRequest> deoptimization_requests_ GUARDED_BY(deoptimization_lock_);
 
   // Count the number of events requiring full deoptimization. When the counter is > 0, everything
@@ -598,6 +610,19 @@
   // session.
   static size_t delayed_full_undeoptimization_count_ GUARDED_BY(deoptimization_lock_);
 
+  static size_t* GetReferenceCounterForEvent(uint32_t instrumentation_event);
+
+  // Instrumentation event reference counters.
+  // TODO we could use an array instead of having all these dedicated counters. Instrumentation
+  // events are bits of a mask so we could convert them to array index.
+  static size_t dex_pc_change_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+  static size_t method_enter_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+  static size_t method_exit_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+  static size_t field_read_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+  static size_t field_write_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+  static size_t exception_catch_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+  static uint32_t instrumentation_events_ GUARDED_BY(Locks::mutator_lock_);
+
   DISALLOW_COPY_AND_ASSIGN(Dbg);
 };
 
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 3de0728..5630862 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -238,6 +238,7 @@
 
   bool IsActive() const {
     return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
+        have_field_read_listeners_ || have_field_write_listeners_ ||
         have_exception_caught_listeners_ || have_method_unwind_listeners_;
   }
 
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 4e2b0f8..cb2c420 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -136,6 +136,28 @@
     }
 }
 
+uint32_t GetInstrumentationEventFor(JdwpEventKind eventKind) {
+  switch (eventKind) {
+    case EK_BREAKPOINT:
+    case EK_SINGLE_STEP:
+      return instrumentation::Instrumentation::kDexPcMoved;
+    case EK_EXCEPTION:
+    case EK_EXCEPTION_CATCH:
+      return instrumentation::Instrumentation::kExceptionCaught;
+    case EK_METHOD_ENTRY:
+      return instrumentation::Instrumentation::kMethodEntered;
+    case EK_METHOD_EXIT:
+    case EK_METHOD_EXIT_WITH_RETURN_VALUE:
+      return instrumentation::Instrumentation::kMethodExited;
+    case EK_FIELD_ACCESS:
+      return instrumentation::Instrumentation::kFieldRead;
+    case EK_FIELD_MODIFICATION:
+      return instrumentation::Instrumentation::kFieldWritten;
+    default:
+      return 0;
+  }
+}
+
 /*
  * Add an event to the list.  Ordering is not important.
  *
@@ -148,30 +170,40 @@
   CHECK(pEvent->prev == NULL);
   CHECK(pEvent->next == NULL);
 
-  /*
-   * If one or more "break"-type mods are used, register them with
-   * the interpreter.
-   */
-  DeoptimizationRequest req;
-  for (int i = 0; i < pEvent->modCount; i++) {
-    const JdwpEventMod* pMod = &pEvent->mods[i];
-    if (pMod->modKind == MK_LOCATION_ONLY) {
-      /* should only be for Breakpoint, Step, and Exception */
-      Dbg::WatchLocation(&pMod->locationOnly.loc, &req);
-    } else if (pMod->modKind == MK_STEP) {
-      /* should only be for EK_SINGLE_STEP; should only be one */
-      JdwpStepSize size = static_cast<JdwpStepSize>(pMod->step.size);
-      JdwpStepDepth depth = static_cast<JdwpStepDepth>(pMod->step.depth);
-      JdwpError status = Dbg::ConfigureStep(pMod->step.threadId, size, depth);
-      if (status != ERR_NONE) {
-        return status;
+  {
+    /*
+     * If one or more "break"-type mods are used, register them with
+     * the interpreter.
+     */
+    DeoptimizationRequest req;
+    for (int i = 0; i < pEvent->modCount; i++) {
+      const JdwpEventMod* pMod = &pEvent->mods[i];
+      if (pMod->modKind == MK_LOCATION_ONLY) {
+        /* should only be for Breakpoint, Step, and Exception */
+        Dbg::WatchLocation(&pMod->locationOnly.loc, &req);
+      } else if (pMod->modKind == MK_STEP) {
+        /* should only be for EK_SINGLE_STEP; should only be one */
+        JdwpStepSize size = static_cast<JdwpStepSize>(pMod->step.size);
+        JdwpStepDepth depth = static_cast<JdwpStepDepth>(pMod->step.depth);
+        JdwpError status = Dbg::ConfigureStep(pMod->step.threadId, size, depth);
+        if (status != ERR_NONE) {
+          return status;
+        }
       }
     }
+    if (NeedsFullDeoptimization(pEvent->eventKind)) {
+      CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
+      CHECK(req.method == nullptr);
+      req.kind = DeoptimizationRequest::kFullDeoptimization;
+    }
+    Dbg::RequestDeoptimization(req);
   }
-  if (NeedsFullDeoptimization(pEvent->eventKind)) {
-    CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
-    CHECK(req.method == nullptr);
-    req.kind = DeoptimizationRequest::kFullDeoptimization;
+  uint32_t instrumentation_event = GetInstrumentationEventFor(pEvent->eventKind);
+  if (instrumentation_event != 0) {
+    DeoptimizationRequest req;
+    req.kind = DeoptimizationRequest::kRegisterForEvent;
+    req.instrumentation_event = instrumentation_event;
+    Dbg::RequestDeoptimization(req);
   }
 
   {
@@ -187,9 +219,6 @@
     ++event_list_size_;
   }
 
-  // TODO we can do better job here since we should process only one request: the one we just
-  // created.
-  Dbg::RequestDeoptimization(req);
   Dbg::ManageDeoptimization();
 
   return ERR_NONE;
@@ -219,40 +248,48 @@
   }
   pEvent->prev = NULL;
 
-  /*
-   * Unhook us from the interpreter, if necessary.
-   */
-  DeoptimizationRequest req;
-  for (int i = 0; i < pEvent->modCount; i++) {
-    JdwpEventMod* pMod = &pEvent->mods[i];
-    if (pMod->modKind == MK_LOCATION_ONLY) {
-      /* should only be for Breakpoint, Step, and Exception */
-      Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req);
+  {
+    /*
+     * Unhook us from the interpreter, if necessary.
+     */
+    DeoptimizationRequest req;
+    for (int i = 0; i < pEvent->modCount; i++) {
+      JdwpEventMod* pMod = &pEvent->mods[i];
+      if (pMod->modKind == MK_LOCATION_ONLY) {
+        /* should only be for Breakpoint, Step, and Exception */
+        Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req);
+      }
+      if (pMod->modKind == MK_STEP) {
+        /* should only be for EK_SINGLE_STEP; should only be one */
+        Dbg::UnconfigureStep(pMod->step.threadId);
+      }
     }
-    if (pMod->modKind == MK_STEP) {
-      /* should only be for EK_SINGLE_STEP; should only be one */
-      Dbg::UnconfigureStep(pMod->step.threadId);
+    if (pEvent->eventKind == EK_SINGLE_STEP) {
+      // Special case for single-steps where we want to avoid the slow pattern deoptimize/undeoptimize
+      // loop between each single-step. In a IDE, this would happens each time the user click on the
+      // "single-step" button. Here we delay the full undeoptimization to the next resume
+      // (VM.Resume or ThreadReference.Resume) or the end of the debugging session (VM.Dispose or
+      // runtime shutdown).
+      // Therefore, in a singles-stepping sequence, only the first single-step will trigger a full
+      // deoptimization and only the last single-step will trigger a full undeoptimization.
+      Dbg::DelayFullUndeoptimization();
+    } else if (NeedsFullDeoptimization(pEvent->eventKind)) {
+      CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
+      CHECK(req.method == nullptr);
+      req.kind = DeoptimizationRequest::kFullUndeoptimization;
     }
+    Dbg::RequestDeoptimization(req);
   }
-  if (pEvent->eventKind == EK_SINGLE_STEP) {
-    // Special case for single-steps where we want to avoid the slow pattern deoptimize/undeoptimize
-    // loop between each single-step. In a IDE, this would happens each time the user click on the
-    // "single-step" button. Here we delay the full undeoptimization to the next resume
-    // (VM.Resume or ThreadReference.Resume) or the end of the debugging session (VM.Dispose or
-    // runtime shutdown).
-    // Therefore, in a singles-stepping sequence, only the first single-step will trigger a full
-    // deoptimization and only the last single-step will trigger a full undeoptimization.
-    Dbg::DelayFullUndeoptimization();
-  } else if (NeedsFullDeoptimization(pEvent->eventKind)) {
-    CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
-    CHECK(req.method == nullptr);
-    req.kind = DeoptimizationRequest::kFullUndeoptimization;
+  uint32_t instrumentation_event = GetInstrumentationEventFor(pEvent->eventKind);
+  if (instrumentation_event != 0) {
+    DeoptimizationRequest req;
+    req.kind = DeoptimizationRequest::kUnregisterForEvent;
+    req.instrumentation_event = instrumentation_event;
+    Dbg::RequestDeoptimization(req);
   }
 
   --event_list_size_;
   CHECK(event_list_size_ != 0 || event_list_ == NULL);
-
-  Dbg::RequestDeoptimization(req);
 }
 
 /*