summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2014-08-16 13:41:10 -0700
committerAndreas Gampe <agampe@google.com>2014-08-18 09:40:40 -0700
commit956a5228276693a7317ae6b41bfe7a7f0f3cbe6b (patch)
tree4fb359d422d2d7af04f1b9dc9b8b5d6f268b138b
parent078f4ea927b3259d698ca3dd542a096943e2cccd (diff)
downloadart-956a5228276693a7317ae6b41bfe7a7f0f3cbe6b.zip
art-956a5228276693a7317ae6b41bfe7a7f0f3cbe6b.tar.gz
art-956a5228276693a7317ae6b41bfe7a7f0f3cbe6b.tar.bz2
ART: Do not recursively abort when visiting locks in a bad state
This avoids a nested abort in VisitLocks. Bug: 17080621, 16382675 (cherry picked from commit 760172c3ccd6e75f6f1a89d8006934e8ffb1303e) Change-Id: Id604976ac9dcac0e319fb25cab4d2cbc98d7ee24
-rw-r--r--runtime/monitor.cc13
-rw-r--r--runtime/monitor.h4
-rw-r--r--runtime/thread.cc3
3 files changed, 16 insertions, 4 deletions
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 433c1b2..7118af1 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -982,7 +982,7 @@ mirror::Object* Monitor::GetContendedMonitor(Thread* thread) {
}
void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
- void* callback_context) {
+ void* callback_context, bool abort_on_failure) {
mirror::ArtMethod* m = stack_visitor->GetMethod();
CHECK(m != NULL);
@@ -1015,10 +1015,19 @@ void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::O
return; // No "tries" implies no synchronization, so no held locks to report.
}
+ // Get the dex pc. If abort_on_failure is false, GetDexPc will not abort in the case it cannot
+ // find the dex pc, and instead return kDexNoIndex. Then bail out, as it indicates we have an
+ // inconsistent stack anyways.
+ uint32_t dex_pc = stack_visitor->GetDexPc(abort_on_failure);
+ if (!abort_on_failure && dex_pc == DexFile::kDexNoIndex) {
+ LOG(ERROR) << "Could not find dex_pc for " << PrettyMethod(m);
+ return;
+ }
+
// Ask the verifier for the dex pcs of all the monitor-enter instructions corresponding to
// the locks held in this stack frame.
std::vector<uint32_t> monitor_enter_dex_pcs;
- verifier::MethodVerifier::FindLocksAtDexPc(m, stack_visitor->GetDexPc(), &monitor_enter_dex_pcs);
+ verifier::MethodVerifier::FindLocksAtDexPc(m, dex_pc, &monitor_enter_dex_pcs);
if (monitor_enter_dex_pcs.empty()) {
return;
}
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 26d43c9..e8321eb 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -88,8 +88,10 @@ class Monitor {
// Calls 'callback' once for each lock held in the single stack frame represented by
// the current state of 'stack_visitor'.
+ // The abort_on_failure flag allows to not die when the state of the runtime is unorderly. This
+ // is necessary when we have already aborted but want to dump the stack as much as we can.
static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
- void* callback_context)
+ void* callback_context, bool abort_on_failure = true)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static bool IsValidLockWord(LockWord lock_word);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index f06d081..c7cd57d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -906,7 +906,8 @@ struct StackDumpVisitor : public StackVisitor {
Monitor::DescribeWait(os, thread);
}
if (can_allocate) {
- Monitor::VisitLocks(this, DumpLockedObject, &os);
+ // Visit locks, but do not abort on errors. This would trigger a nested abort.
+ Monitor::VisitLocks(this, DumpLockedObject, &os, false);
}
}