summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--runtime/runtime.cc7
-rw-r--r--runtime/thread_list.cc35
-rw-r--r--runtime/trace.cc30
-rw-r--r--runtime/trace.h5
4 files changed, 47 insertions, 30 deletions
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1cbf841..44cd2ed 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -153,13 +153,6 @@ Runtime::Runtime()
}
Runtime::~Runtime() {
- if (method_trace_ && Thread::Current() == nullptr) {
- // We need a current thread to shutdown method tracing: re-attach it now.
- JNIEnv* unused_env;
- if (GetJavaVM()->AttachCurrentThread(&unused_env, nullptr) != JNI_OK) {
- LOG(ERROR) << "Could not attach current thread before runtime shutdown.";
- }
- }
if (dump_gc_performance_on_shutdown_) {
// This can't be called from the Heap destructor below because it
// could call RosAlloc::InspectAll() which needs the thread_list
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ff1a079..7cf26e6 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -294,16 +294,19 @@ size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function)
void ThreadList::SuspendAll() {
Thread* self = Thread::Current();
- DCHECK(self != nullptr);
- VLOG(threads) << *self << " SuspendAll starting...";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " SuspendAll starting...";
+ } else {
+ VLOG(threads) << "Thread[null] SuspendAll starting...";
+ }
ATRACE_BEGIN("Suspending mutator threads");
uint64_t start_time = NanoTime();
Locks::mutator_lock_->AssertNotHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
- if (kDebugLocking) {
+ if (kDebugLocking && self != nullptr) {
CHECK_NE(self->GetState(), kRunnable);
}
{
@@ -344,14 +347,21 @@ void ThreadList::SuspendAll() {
ATRACE_END();
ATRACE_BEGIN("Mutator threads suspended");
- VLOG(threads) << *self << " SuspendAll complete";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " SuspendAll complete";
+ } else {
+ VLOG(threads) << "Thread[null] SuspendAll complete";
+ }
}
void ThreadList::ResumeAll() {
Thread* self = Thread::Current();
- DCHECK(self != nullptr);
- VLOG(threads) << *self << " ResumeAll starting";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " ResumeAll starting";
+ } else {
+ VLOG(threads) << "Thread[null] ResumeAll starting";
+ }
ATRACE_END();
ATRACE_BEGIN("Resuming mutator threads");
@@ -377,11 +387,20 @@ void ThreadList::ResumeAll() {
// Broadcast a notification to all suspended threads, some or all of
// which may choose to wake up. No need to wait for them.
- VLOG(threads) << *self << " ResumeAll waking others";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " ResumeAll waking others";
+ } else {
+ VLOG(threads) << "Thread[null] ResumeAll waking others";
+ }
Thread::resume_cond_->Broadcast(self);
}
ATRACE_END();
- VLOG(threads) << *self << " ResumeAll complete";
+
+ if (self != nullptr) {
+ VLOG(threads) << *self << " ResumeAll complete";
+ } else {
+ VLOG(threads) << "Thread[null] ResumeAll complete";
+ }
}
void ThreadList::Resume(Thread* thread, bool for_debugger) {
diff --git a/runtime/trace.cc b/runtime/trace.cc
index f51b8c4..ca5e150 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -178,29 +178,29 @@ bool Trace::UseWallClock() {
(clock_source_ == kTraceClockSourceDual);
}
-static void MeasureClockOverhead(Trace* trace) {
- if (trace->UseThreadCpuClock()) {
+void Trace::MeasureClockOverhead() {
+ if (UseThreadCpuClock()) {
Thread::Current()->GetCpuMicroTime();
}
- if (trace->UseWallClock()) {
+ if (UseWallClock()) {
MicroTime();
}
}
// Compute an average time taken to measure clocks.
-static uint32_t GetClockOverheadNanoSeconds(Trace* trace) {
+uint32_t Trace::GetClockOverheadNanoSeconds() {
Thread* self = Thread::Current();
uint64_t start = self->GetCpuMicroTime();
for (int i = 4000; i > 0; i--) {
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
}
uint64_t elapsed_us = self->GetCpuMicroTime() - start;
@@ -444,7 +444,8 @@ TracingMode Trace::GetMethodTracingMode() {
Trace::Trace(File* trace_file, int buffer_size, int flags, bool sampling_enabled)
: trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags),
sampling_enabled_(sampling_enabled), clock_source_(default_clock_source_),
- buffer_size_(buffer_size), start_time_(MicroTime()), cur_offset_(0), overflow_(false) {
+ buffer_size_(buffer_size), start_time_(MicroTime()),
+ clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0), overflow_(false) {
// Set up the beginning of the trace.
uint16_t trace_version = GetTraceVersion(clock_source_);
memset(buf_.get(), 0, kTraceHeaderLength);
@@ -480,7 +481,6 @@ void Trace::FinishTracing() {
uint64_t elapsed = MicroTime() - start_time_;
size_t final_offset = cur_offset_.LoadRelaxed();
- uint32_t clock_overhead_ns = GetClockOverheadNanoSeconds(this);
if ((flags_ & kTraceCountAllocs) != 0) {
Runtime::Current()->SetStatsEnabled(false);
@@ -506,7 +506,7 @@ void Trace::FinishTracing() {
os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed);
size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_);
os << StringPrintf("num-method-calls=%zd\n", num_records);
- os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns);
+ os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_);
os << StringPrintf("vm=art\n");
if ((flags_ & kTraceCountAllocs) != 0) {
os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS));
diff --git a/runtime/trace.h b/runtime/trace.h
index d7836b8..45a02da 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -68,6 +68,8 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
bool UseWallClock();
bool UseThreadCpuClock();
+ void MeasureClockOverhead();
+ uint32_t GetClockOverheadNanoSeconds();
void CompareAndUpdateStackTrace(Thread* thread, std::vector<mirror::ArtMethod*>* stack_trace)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -155,6 +157,9 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
// Time trace was created.
const uint64_t start_time_;
+ // Clock overhead.
+ const uint32_t clock_overhead_ns_;
+
// Offset into buf_.
AtomicInteger cur_offset_;