summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--runtime/gc/collector/semi_space.cc30
-rw-r--r--runtime/gc/heap.cc103
-rw-r--r--runtime/gc/heap.h13
-rw-r--r--runtime/gc/space/bump_pointer_space.cc8
-rw-r--r--runtime/gc/space/bump_pointer_space.h7
-rw-r--r--runtime/gc/space/dlmalloc_space.cc5
-rw-r--r--runtime/gc/space/dlmalloc_space.h1
-rw-r--r--runtime/gc/space/rosalloc_space.cc5
-rw-r--r--runtime/gc/space/rosalloc_space.h1
-rw-r--r--runtime/gc/space/space.h3
-rw-r--r--runtime/gc/space/zygote_space.cc4
-rw-r--r--runtime/gc/space/zygote_space.h3
-rw-r--r--runtime/runtime.cc7
-rw-r--r--runtime/runtime.h11
-rw-r--r--runtime/runtime_android.cc50
-rw-r--r--runtime/runtime_linux.cc10
-rw-r--r--runtime/thread.h7
17 files changed, 241 insertions, 27 deletions
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index a4c9dea..4668a19 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -61,7 +61,8 @@ namespace gc {
namespace collector {
static constexpr bool kProtectFromSpace = true;
-static constexpr bool kResetFromSpace = true;
+static constexpr bool kClearFromSpace = true;
+static constexpr bool kStoreStackTraces = false;
// TODO: Unduplicate logic.
void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
@@ -169,6 +170,19 @@ void SemiSpace::ProcessReferences(Thread* self) {
}
void SemiSpace::MarkingPhase() {
+ if (kStoreStackTraces) {
+ Locks::mutator_lock_->AssertExclusiveHeld(self_);
+ // Store the stack traces into the runtime fault string in case we get a heap corruption
+ // related crash later.
+ ThreadState old_state = self_->SetStateUnsafe(kRunnable);
+ std::ostringstream oss;
+ Runtime* runtime = Runtime::Current();
+ runtime->GetThreadList()->DumpForSigQuit(oss);
+ runtime->GetThreadList()->DumpNativeStacks(oss);
+ runtime->SetFaultMessage(oss.str());
+ CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
+ }
+
if (generational_) {
if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc ||
clear_soft_references_) {
@@ -353,19 +367,17 @@ void SemiSpace::ReclaimPhase() {
TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
GetHeap()->UnBindBitmaps();
}
- // Release the memory used by the from space.
- if (kResetFromSpace) {
- // Clearing from space.
+ if (kClearFromSpace) {
+ // Release the memory used by the from space.
from_space_->Clear();
}
+ from_space_->Reset();
// Protect the from space.
- VLOG(heap)
- << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - "
- << reinterpret_cast<void*>(from_space_->Limit());
+ VLOG(heap) << "Protecting space " << *from_space_;
if (kProtectFromSpace) {
- mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE);
+ from_space_->GetMemMap()->Protect(PROT_NONE);
} else {
- mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
+ from_space_->GetMemMap()->Protect(PROT_READ);
}
if (saved_bytes_ > 0) {
VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2e6d2c2..9ad21cf 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -318,6 +318,91 @@ void Heap::ChangeAllocator(AllocatorType allocator) {
}
}
+std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
+ if (!IsValidContinuousSpaceObjectAddress(klass)) {
+ return StringPrintf("<non heap address klass %p>", klass);
+ }
+ mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
+ if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
+ std::string result("[");
+ result += SafeGetClassDescriptor(component_type);
+ return result;
+ } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
+ return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
+ } else if (UNLIKELY(klass->IsProxyClass())) {
+ return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
+ } else {
+ mirror::DexCache* dex_cache = klass->GetDexCache();
+ if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
+ return StringPrintf("<non heap address dex_cache %p>", dex_cache);
+ }
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ uint16_t class_def_idx = klass->GetDexClassDefIndex();
+ if (class_def_idx == DexFile::kDexNoIndex16) {
+ return "<class def not found>";
+ }
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
+ const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
+ return dex_file->GetTypeDescriptor(type_id);
+ }
+}
+
+std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
+ if (obj == nullptr) {
+ return "null";
+ }
+ mirror::Class* klass = obj->GetClass<kVerifyNone>();
+ if (klass == nullptr) {
+ return "(class=null)";
+ }
+ std::string result(SafeGetClassDescriptor(klass));
+ if (obj->IsClass()) {
+ result += "<" + SafeGetClassDescriptor(obj->AsClass()) + ">";
+ }
+ return result;
+}
+
+void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
+ if (obj == nullptr) {
+ stream << "(obj=null)";
+ return;
+ }
+ if (IsAligned<kObjectAlignment>(obj)) {
+ space::Space* space = nullptr;
+ // Don't use find space since it only finds spaces which actually contain objects instead of
+ // spaces which may contain objects (e.g. cleared bump pointer spaces).
+ for (const auto& cur_space : continuous_spaces_) {
+ if (cur_space->HasAddress(obj)) {
+ space = cur_space;
+ break;
+ }
+ }
+ if (space == nullptr) {
+ if (allocator_mem_map_.get() == nullptr || !allocator_mem_map_->HasAddress(obj)) {
+ stream << "obj " << obj << " not a valid heap address";
+ return;
+ } else if (allocator_mem_map_.get() != nullptr) {
+ allocator_mem_map_->Protect(PROT_READ | PROT_WRITE);
+ }
+ }
+ // Unprotect all the spaces.
+ for (const auto& space : continuous_spaces_) {
+ mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
+ }
+ stream << "Object " << obj;
+ if (space != nullptr) {
+ stream << " in space " << *space;
+ }
+ mirror::Class* klass = obj->GetClass();
+ stream << "\nclass=" << klass;
+ if (klass != nullptr) {
+ stream << " type= " << SafePrettyTypeOf(obj);
+ }
+ // Re-protect the address we faulted on.
+ mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
+ }
+}
+
bool Heap::IsCompilingBoot() const {
for (const auto& space : continuous_spaces_) {
if (space->IsImageSpace() || space->IsZygoteSpace()) {
@@ -809,16 +894,23 @@ bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
if (obj == nullptr) {
return true;
}
- return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj);
+ return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
}
bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
return FindContinuousSpaceFromObject(obj, true) != nullptr;
}
-bool Heap::IsHeapAddress(const mirror::Object* obj) const {
- // TODO: This might not work for large objects.
- return FindSpaceFromObject(obj, true) != nullptr;
+bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
+ if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
+ return false;
+ }
+ for (const auto& space : continuous_spaces_) {
+ if (space->HasAddress(obj)) {
+ return true;
+ }
+ }
+ return false;
}
bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
@@ -1539,6 +1631,7 @@ void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap1,
void Heap::SwapSemiSpaces() {
// Swap the spaces so we allocate into the space which we just evacuated.
std::swap(bump_pointer_space_, temp_space_);
+ bump_pointer_space_->Clear();
}
void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
@@ -1616,7 +1709,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
CHECK(temp_space_->IsEmpty());
semi_space_collector_->SetFromSpace(bump_pointer_space_);
semi_space_collector_->SetToSpace(temp_space_);
- mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
+ temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
collector = semi_space_collector_;
gc_type = collector::kGcTypeFull;
} else if (current_allocator_ == kAllocatorTypeRosAlloc ||
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 2f227d0..b194d8d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -223,9 +223,6 @@ class Heap {
bool IsValidObjectAddress(const mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns true if the address passed in is a heap address, doesn't need to be aligned.
- bool IsHeapAddress(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Faster alternative to IsHeapAddress since finding if an object is in the large object space is
// very slow.
bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
@@ -519,6 +516,12 @@ class Heap {
void DumpSpaces(std::ostream& stream = LOG(INFO));
+ // Dump object should only be used by the signal handler.
+ void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
+ // Safe version of pretty type of which check to make sure objects are heap addresses.
+ std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
+ std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
+
// GC performance measuring
void DumpGcPerformanceInfo(std::ostream& os);
@@ -600,6 +603,10 @@ class Heap {
template <bool kGrow>
bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
+ // Returns true if the address passed in is within the address range of a continuous space.
+ bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Pushes a list of cleared references out to the managed heap.
void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 43674ea..fcd3b70 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -61,6 +61,9 @@ BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
void BumpPointerSpace::Clear() {
// Release the pages back to the operating system.
CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
+}
+
+void BumpPointerSpace::Reset() {
// Reset the end of the space back to the beginning, we move the end forward as we allocate
// objects.
SetEnd(Begin());
@@ -75,8 +78,9 @@ void BumpPointerSpace::Clear() {
}
void BumpPointerSpace::Dump(std::ostream& os) const {
- os << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
- << reinterpret_cast<void*>(Limit());
+ os << GetName() << " "
+ << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
+ << reinterpret_cast<void*>(Limit());
}
mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 476b833..2c9d35f 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -92,8 +92,11 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return nullptr;
}
- // Clear the memory and reset the pointer to the start of the space.
- void Clear() OVERRIDE LOCKS_EXCLUDED(block_lock_);
+ // Madvise the memory back to the OS.
+ void Clear() OVERRIDE;
+
+ // Reset the pointer to the start of the space.
+ void Reset() OVERRIDE LOCKS_EXCLUDED(block_lock_);
void Dump(std::ostream& os) const;
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index caedaaf..b591486 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -281,12 +281,15 @@ uint64_t DlMallocSpace::GetObjectsAllocated() {
}
void DlMallocSpace::Clear() {
- // TODO: Delete and create new mspace here.
madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
GetLiveBitmap()->Clear();
GetMarkBitmap()->Clear();
}
+void DlMallocSpace::Reset() {
+ // TODO: Delete and create new mspace here.
+}
+
#ifndef NDEBUG
void DlMallocSpace::CheckMoreCoreForPrecondition() {
lock_.AssertHeld(Thread::Current());
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 6ea10ad..4bf16ce 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -113,6 +113,7 @@ class DlMallocSpace : public MallocSpace {
uint64_t GetObjectsAllocated() OVERRIDE;
void Clear() OVERRIDE;
+ void Reset() OVERRIDE;
bool IsDlMallocSpace() const OVERRIDE {
return true;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index fe8421d..fb621ea 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -304,12 +304,15 @@ void RosAllocSpace::RevokeAllThreadLocalBuffers() {
}
void RosAllocSpace::Clear() {
- // TODO: Delete and create new mspace here.
madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
GetLiveBitmap()->Clear();
GetMarkBitmap()->Clear();
}
+void RosAllocSpace::Reset() {
+ // TODO: Delete and create new mspace here.
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index bd32196..5bc425d 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -80,6 +80,7 @@ class RosAllocSpace : public MallocSpace {
void SetFootprintLimit(size_t limit) OVERRIDE;
void Clear() OVERRIDE;
+ void Reset() OVERRIDE;
MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
byte* begin, byte* end, byte* limit, size_t growth_limit);
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 0f8f38a..37d7c80 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -399,6 +399,9 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
// Free all memory associated with this space.
virtual void Clear() = 0;
+ // Reset the space back to an empty space.
+ virtual void Reset() = 0;
+
accounting::SpaceBitmap* GetLiveBitmap() const {
return live_bitmap_.get();
}
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index a60ab38..d1c3d03 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -61,6 +61,10 @@ void ZygoteSpace::Clear() {
LOG(FATAL) << "Unimplemented";
}
+void ZygoteSpace::Reset() {
+ LOG(FATAL) << "Unimplemented";
+}
+
ZygoteSpace::ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated)
: ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
kGcRetentionPolicyFullCollect),
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 8cd1a9f..8880548 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -71,7 +71,8 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
return objects_allocated_;
}
- void Clear();
+ void Clear() OVERRIDE;
+ void Reset() OVERRIDE;
protected:
virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1ef15f7..90ba7d3 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -91,6 +91,8 @@ Runtime::Runtime()
resolution_method_(nullptr),
imt_conflict_method_(nullptr),
default_imt_(nullptr),
+ fault_message_lock_("Fault message lock"),
+ fault_message_(""),
method_verifiers_lock_("Method verifiers lock"),
threads_being_born_(0),
shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
@@ -1598,4 +1600,9 @@ void Runtime::RecordWeakStringRemoval(mirror::String* s, uint32_t hash_code) con
DCHECK(IsActiveTransaction());
preinitialization_transaction->RecordWeakStringRemoval(s, hash_code);
}
+
+void Runtime::SetFaultMessage(const std::string& message) {
+ MutexLock mu(Thread::Current(), fault_message_lock_);
+ fault_message_ = message;
+}
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 8924921..249bb45 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -446,6 +446,13 @@ class Runtime {
void RecordWeakStringRemoval(mirror::String* s, uint32_t hash_code) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void SetFaultMessage(const std::string& message);
+ // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
+ // with the unexpected_signal_lock_.
+ const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
+ return fault_message_;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -520,6 +527,10 @@ class Runtime {
mirror::ObjectArray<mirror::ArtMethod>* default_imt_;
+ // Fault message, printed when we get a SIGSEGV.
+ Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::string fault_message_ GUARDED_BY(fault_message_lock_);
+
// Method verifier set, used so that we can update their GC roots.
Mutex method_verifiers_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::set<verifier::MethodVerifier*> method_verifiers_;
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index 2013294..14e5574 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -14,12 +14,58 @@
* limitations under the License.
*/
-#include "runtime.h"
+#include <signal.h>
+#include <string.h>
+#include <sys/utsname.h>
+#include <inttypes.h>
+
+#include "base/logging.h"
+#include "base/mutex.h"
+#include "base/stringprintf.h"
+#include "thread-inl.h"
+#include "utils.h"
namespace art {
+struct sigaction old_action;
+void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) {
+ static bool handlingUnexpectedSignal = false;
+ if (handlingUnexpectedSignal) {
+ LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
+ LogMessage::LogLine(data, "HandleUnexpectedSignal reentered\n");
+ _exit(1);
+ }
+ handlingUnexpectedSignal = true;
+ gAborting++; // set before taking any locks
+ MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
+
+ Runtime* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ // Print this out first in case DumpObject faults.
+ LOG(INTERNAL_FATAL) << "Fault message: " << runtime->GetFaultMessage();
+ gc::Heap* heap = runtime->GetHeap();
+ if (heap != nullptr && info != nullptr) {
+ LOG(INTERNAL_FATAL) << "Dump heap object at fault address: ";
+ heap->DumpObject(LOG(INTERNAL_FATAL), reinterpret_cast<mirror::Object*>(info->si_addr));
+ }
+ }
+ // Run the old signal handler.
+ old_action.sa_sigaction(signal_number, info, raw_context);
+}
+
void Runtime::InitPlatformSignalHandlers() {
- // On a device, debuggerd will give us a stack trace. Nothing to do here.
+ // On the host, we don't have debuggerd to dump a stack for us when something unexpected happens.
+ struct sigaction action;
+ memset(&action, 0, sizeof(action));
+ sigemptyset(&action.sa_mask);
+ action.sa_sigaction = HandleUnexpectedSignal;
+ // Use the three-argument sa_sigaction handler.
+ action.sa_flags |= SA_SIGINFO;
+ // Use the alternate signal stack so we can catch stack overflows.
+ action.sa_flags |= SA_ONSTACK;
+ int rc = 0;
+ rc += sigaction(SIGSEGV, &action, &old_action);
+ CHECK_EQ(rc, 0);
}
} // namespace art
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 73ac034..4a166d7 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -305,7 +305,15 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
<< "Thread: " << tid << " \"" << thread_name << "\"\n"
<< "Registers:\n" << Dumpable<UContext>(thread_context) << "\n"
<< "Backtrace:\n" << Dumpable<Backtrace>(thread_backtrace);
-
+ Runtime* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ gc::Heap* heap = runtime->GetHeap();
+ LOG(INTERNAL_FATAL) << "Fault message: " << runtime->GetFaultMessage();
+ if (heap != nullptr && info != nullptr) {
+ LOG(INTERNAL_FATAL) << "Dump heap object at fault address: ";
+ heap->DumpObject(LOG(INTERNAL_FATAL), reinterpret_cast<mirror::Object*>(info->si_addr));
+ }
+ }
if (getenv("debug_db_uid") != NULL || getenv("art_wait_for_gdb_on_crash") != NULL) {
LOG(INTERNAL_FATAL) << "********************************************************\n"
<< "* Process " << getpid() << " thread " << tid << " \"" << thread_name << "\""
diff --git a/runtime/thread.h b/runtime/thread.h
index f9d31af..fcae9e4 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -42,6 +42,12 @@
namespace art {
+namespace gc {
+namespace collector {
+class SemiSpace;
+} // namespace collector
+} // namespace gc
+
namespace mirror {
class ArtMethod;
class Array;
@@ -851,6 +857,7 @@ class PACKED(4) Thread {
private:
friend class Dbg; // For SetStateUnsafe.
+ friend class gc::collector::SemiSpace; // For getting stack traces.
friend class Monitor;
friend class MonitorInfo;
friend class Runtime; // For CreatePeer.