summaryrefslogtreecommitdiffstats
path: root/runtime/jdwp
diff options
context:
space:
mode:
authorSebastien Hertz <shertz@google.com>2013-12-04 18:15:25 +0100
committerSebastien Hertz <shertz@google.com>2014-01-13 15:08:24 +0100
commit138dbfc3336e379d74d157086f69a0fbe830089b (patch)
treef5fa88466e6ab339d2a6faf9c9105bafcaee3ea7 /runtime/jdwp
parent62e45483db4aa1be096d3ce91903e01ef52fb901 (diff)
downloadart-138dbfc3336e379d74d157086f69a0fbe830089b.zip
art-138dbfc3336e379d74d157086f69a0fbe830089b.tar.gz
art-138dbfc3336e379d74d157086f69a0fbe830089b.tar.bz2
Selective deoptimization.
Update the instrumentation to allow selective deoptimization. Separate instrumentation listener registration from stubs configuration. A listener is now responsible for configuring the appropriate stubs. - The method tracing listener installs instrumentation entry/exit stubs or the interpreter depending on the accuracy of events we want (controlled by kDeoptimizeForAccurateMethodEntryExitListeners). - The debugger registers itself as an instrumentation listener but does not modify methods entrypoints. It only does this on demand when deoptimizing one method or all the methods. The selective deoptimization is used for breakpoint only. When a breakpoint is requested, the debugger deoptimizes this method by setting its entrypoint to the interpreter stub. As several breakpoints can be set on the same method, we deoptimize only once. When the last breakpoint on a method is removed, we reoptimize it by restoring the original entrypoints. The full deoptimization is used for method entry, method exit and single-step events. When one of these events is requested, we force eveything to run with the interpreter (except native and proxy methods). When the last of these events is removed, we restore all methods entrypoints except those which are currently deoptimized. Deoptimizing a method requires all mutator threads be suspended in order to walk each thread's stack and ensure no code is actually executing while we modify methods entrypoints. Suspending all the threads requires to not hold any lock. In the debugger, we deoptimize/undeoptimize when the JDWP event list changes (add or remove a breakpoint for instance). During the update, we need to hold the JDWP event list lock. This means we cannot suspend all the threads at this time. In order to deal with these constraints, we support a queue of deoptimization requests. When an event needs selective/full deoptimization/undeoptimization, we save its request in the queue. Once we release the JDWP event list lock, we suspend all the threads, process this queue and finally resume all the threads. This is done in Dbg::ManageDeoptimization. Note: threads already suspended before doing this remain suspended so we don't "break" debugger suspensions. When we deoptimize one method or every method, we need to browse each thread's stack to install instrumentation exit PC as return PC and save information in the instrumentation stack frame. Now we can deoptimize multiple times during the execution of an application, we need to preserve exisiting instrumentation frames (which is the result of a previous deoptimization). This require to push new instrumentation frames before existing ones so we don't corrupt the instrumentation stack frame while walking the stack. Bug: 11538162 Change-Id: I477142df17edf2dab8ac5d879daacc5c08a67c39
Diffstat (limited to 'runtime/jdwp')
-rw-r--r--runtime/jdwp/jdwp.h6
-rw-r--r--runtime/jdwp/jdwp_event.cc98
-rw-r--r--runtime/jdwp/jdwp_main.cc1
3 files changed, 79 insertions, 26 deletions
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index fd78bf2..ebc844e 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -328,9 +328,11 @@ struct JdwpState {
AtomicInteger event_serial_;
// Linked list of events requested by the debugger (breakpoints, class prep, etc).
- Mutex event_list_lock_;
+ Mutex event_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
JdwpEvent* event_list_ GUARDED_BY(event_list_lock_);
- int event_list_size_ GUARDED_BY(event_list_lock_); // Number of elements in event_list_.
+ size_t event_list_size_ GUARDED_BY(event_list_lock_); // Number of elements in event_list_.
+ size_t full_deoptimization_requests_ GUARDED_BY(event_list_lock_); // Number of events requiring
+ // full deoptimization.
// Used to synchronize suspension of the event thread (to avoid receiving "resume"
// events before the thread has finished suspending itself).
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index b05b49d..4aa7f13 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -135,6 +135,18 @@ static void dumpEvent(const JdwpEvent* pEvent) {
}
}
+static bool NeedsFullDeoptimization(JdwpEventKind eventKind) {
+ switch (eventKind) {
+ case EK_METHOD_ENTRY:
+ case EK_METHOD_EXIT:
+ case EK_METHOD_EXIT_WITH_RETURN_VALUE:
+ case EK_SINGLE_STEP:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
* Add an event to the list. Ordering is not important.
*
@@ -170,16 +182,31 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) {
}
}
- /*
- * Add to list.
- */
- MutexLock mu(Thread::Current(), event_list_lock_);
- if (event_list_ != NULL) {
- pEvent->next = event_list_;
- event_list_->prev = pEvent;
+ {
+ /*
+ * Add to list.
+ */
+ MutexLock mu(Thread::Current(), event_list_lock_);
+ if (event_list_ != NULL) {
+ pEvent->next = event_list_;
+ event_list_->prev = pEvent;
+ }
+ event_list_ = pEvent;
+ ++event_list_size_;
+
+ /**
+ * Do we need to enable full deoptimization ?
+ */
+ if (NeedsFullDeoptimization(pEvent->eventKind)) {
+ if (full_deoptimization_requests_ == 0) {
+ // This is the first event that needs full deoptimization: enable it.
+ Dbg::EnableFullDeoptimization();
+ }
+ ++full_deoptimization_requests_;
+ }
}
- event_list_ = pEvent;
- ++event_list_size_;
+
+ Dbg::ManageDeoptimization();
return ERR_NONE;
}
@@ -225,6 +252,17 @@ void JdwpState::UnregisterEvent(JdwpEvent* pEvent) {
--event_list_size_;
CHECK(event_list_size_ != 0 || event_list_ == NULL);
+
+ /**
+ * Can we disable full deoptimization ?
+ */
+ if (NeedsFullDeoptimization(pEvent->eventKind)) {
+ --full_deoptimization_requests_;
+ if (full_deoptimization_requests_ == 0) {
+ // We no longer need full deoptimization.
+ Dbg::DisableFullDeoptimization();
+ }
+ }
}
/*
@@ -235,20 +273,25 @@ void JdwpState::UnregisterEvent(JdwpEvent* pEvent) {
* explicitly remove one-off single-step events.)
*/
void JdwpState::UnregisterEventById(uint32_t requestId) {
- MutexLock mu(Thread::Current(), event_list_lock_);
+ bool found = false;
+ {
+ MutexLock mu(Thread::Current(), event_list_lock_);
- JdwpEvent* pEvent = event_list_;
- while (pEvent != NULL) {
- if (pEvent->requestId == requestId) {
- UnregisterEvent(pEvent);
- EventFree(pEvent);
- return; /* there can be only one with a given ID */
+ for (JdwpEvent* pEvent = event_list_; pEvent != nullptr; pEvent = pEvent->next) {
+ if (pEvent->requestId == requestId) {
+ found = true;
+ UnregisterEvent(pEvent);
+ EventFree(pEvent);
+ break; /* there can be only one with a given ID */
+ }
}
-
- pEvent = pEvent->next;
}
- // ALOGD("Odd: no match when removing event reqId=0x%04x", requestId);
+ if (found) {
+ Dbg::ManageDeoptimization();
+ } else {
+ LOG(DEBUG) << StringPrintf("Odd: no match when removing event reqId=0x%04x", requestId);
+ }
}
/*
@@ -692,6 +735,8 @@ bool JdwpState::PostVMStart() {
expandBufAdd8BE(pReq, threadId);
}
+ Dbg::ManageDeoptimization();
+
/* send request and possibly suspend ourselves */
SendRequestAndPossiblySuspend(pReq, suspend_policy, threadId);
@@ -753,14 +798,12 @@ bool JdwpState::PostLocationEvent(const JdwpLocation* pLoc, ObjectId thisPtr, in
return false;
}
- JdwpEvent** match_list = NULL;
int match_count = 0;
ExpandBuf* pReq = NULL;
JdwpSuspendPolicy suspend_policy = SP_NONE;
-
{
MutexLock mu(Thread::Current(), event_list_lock_);
- match_list = AllocMatchList(event_list_size_);
+ JdwpEvent** match_list = AllocMatchList(event_list_size_);
if ((eventFlags & Dbg::kBreakpoint) != 0) {
FindMatchingEvents(EK_BREAKPOINT, &basket, match_list, &match_count);
}
@@ -800,6 +843,8 @@ bool JdwpState::PostLocationEvent(const JdwpLocation* pLoc, ObjectId thisPtr, in
CleanupMatchList(match_list, match_count);
}
+ Dbg::ManageDeoptimization();
+
SendRequestAndPossiblySuspend(pReq, suspend_policy, basket.threadId);
return match_count != 0;
}
@@ -859,6 +904,8 @@ bool JdwpState::PostThreadChange(ObjectId threadId, bool start) {
CleanupMatchList(match_list, match_count);
}
+ Dbg::ManageDeoptimization();
+
SendRequestAndPossiblySuspend(pReq, suspend_policy, basket.threadId);
return match_count != 0;
@@ -912,13 +959,12 @@ bool JdwpState::PostException(const JdwpLocation* pThrowLoc,
return false;
}
- JdwpEvent** match_list = NULL;
int match_count = 0;
ExpandBuf* pReq = NULL;
JdwpSuspendPolicy suspend_policy = SP_NONE;
{
MutexLock mu(Thread::Current(), event_list_lock_);
- match_list = AllocMatchList(event_list_size_);
+ JdwpEvent** match_list = AllocMatchList(event_list_size_);
FindMatchingEvents(EK_EXCEPTION, &basket, match_list, &match_count);
if (match_count != 0) {
VLOG(jdwp) << "EVENT: " << match_list[0]->eventKind << "(" << match_count << " total)"
@@ -954,6 +1000,8 @@ bool JdwpState::PostException(const JdwpLocation* pThrowLoc,
CleanupMatchList(match_list, match_count);
}
+ Dbg::ManageDeoptimization();
+
SendRequestAndPossiblySuspend(pReq, suspend_policy, basket.threadId);
return match_count != 0;
@@ -1024,6 +1072,8 @@ bool JdwpState::PostClassPrepare(JdwpTypeTag tag, RefTypeId refTypeId, const std
CleanupMatchList(match_list, match_count);
}
+ Dbg::ManageDeoptimization();
+
SendRequestAndPossiblySuspend(pReq, suspend_policy, basket.threadId);
return match_count != 0;
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 93deee5..127ebfa 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -214,6 +214,7 @@ JdwpState::JdwpState(const JdwpOptions* options)
event_list_lock_("JDWP event list lock", kJdwpEventListLock),
event_list_(NULL),
event_list_size_(0),
+ full_deoptimization_requests_(0),
event_thread_lock_("JDWP event thread lock"),
event_thread_cond_("JDWP event thread condition variable", event_thread_lock_),
event_thread_id_(0),