diff options
author | Christopher Ferris <cferris@google.com> | 2015-07-11 22:47:06 -0700 |
---|---|---|
committer | Christopher Ferris <cferris@google.com> | 2015-07-13 12:17:50 -0700 |
commit | 9fee99b06013787054a312449b94115038e2ad7c (patch) | |
tree | ae11f880964b8cf750dc46c5c71eba133de81923 /libc/bionic | |
parent | 65dd858239c4e32a5a1afbc14ac30dbcdd2a50a3 (diff) | |
download | bionic-9fee99b06013787054a312449b94115038e2ad7c.zip bionic-9fee99b06013787054a312449b94115038e2ad7c.tar.gz bionic-9fee99b06013787054a312449b94115038e2ad7c.tar.bz2 |
Do not hold hash table lock while backtracing.
There is a deadlock if holding the hash table lock while trying to do
a backtrace. Change the code so that the hash table lock is only held
while actually modifying either g_hash_table, or while modifying an
entry from g_hash_table.
Bug: 22423683
Change-Id: I604ea56f940f22e99da41ea4dcdf97bedaac268d
Diffstat (limited to 'libc/bionic')
-rw-r--r-- | libc/bionic/malloc_debug_leak.cpp | 8 |
1 files changed, 3 insertions, 5 deletions
diff --git a/libc/bionic/malloc_debug_leak.cpp b/libc/bionic/malloc_debug_leak.cpp index 64f2112..6a46667 100644 --- a/libc/bionic/malloc_debug_leak.cpp +++ b/libc/bionic/malloc_debug_leak.cpp @@ -133,8 +133,9 @@ static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size size |= SIZE_FLAG_ZYGOTE_CHILD; } + // Keep the lock held for as little time as possible to prevent deadlocks. + ScopedPthreadMutexLocker locker(&g_hash_table->lock); HashEntry* entry = find_entry(g_hash_table, slot, backtrace, numEntries, size); - if (entry != NULL) { entry->allocations++; } else { @@ -302,8 +303,6 @@ extern "C" void* leak_malloc(size_t bytes) { void* base = g_malloc_dispatch->malloc(size); if (base != NULL) { - ScopedPthreadMutexLocker locker(&g_hash_table->lock); - uintptr_t backtrace[BACKTRACE_SIZE]; size_t numEntries = GET_BACKTRACE(backtrace, BACKTRACE_SIZE); @@ -328,8 +327,6 @@ extern "C" void leak_free(void* mem) { return; } - ScopedPthreadMutexLocker locker(&g_hash_table->lock); - // check the guard to make sure it is valid AllocationEntry* header = to_header(mem); @@ -342,6 +339,7 @@ extern "C" void leak_free(void* mem) { } } + ScopedPthreadMutexLocker locker(&g_hash_table->lock); if (header->guard == GUARD || is_valid_entry(header->entry)) { // decrement the allocations HashEntry* entry = header->entry; |