diff options
Diffstat (limited to 'libc/bionic')
-rw-r--r-- | libc/bionic/debug_stacktrace.cpp | 26 | ||||
-rw-r--r-- | libc/bionic/libc_init_common.cpp | 2 | ||||
-rw-r--r-- | libc/bionic/libc_logging.cpp | 4 | ||||
-rw-r--r-- | libc/bionic/locale.cpp | 66 | ||||
-rw-r--r-- | libc/bionic/malloc_debug_check.cpp | 24 | ||||
-rw-r--r-- | libc/bionic/malloc_debug_common.cpp | 80 | ||||
-rw-r--r-- | libc/bionic/malloc_debug_leak.cpp | 22 | ||||
-rw-r--r-- | libc/bionic/pthread_accessor.h | 6 | ||||
-rw-r--r-- | libc/bionic/pthread_atfork.cpp | 29 | ||||
-rw-r--r-- | libc/bionic/pthread_create.cpp | 8 | ||||
-rw-r--r-- | libc/bionic/pthread_debug.cpp | 66 | ||||
-rw-r--r-- | libc/bionic/pthread_exit.cpp | 4 | ||||
-rw-r--r-- | libc/bionic/pthread_internal.h | 4 | ||||
-rw-r--r-- | libc/bionic/pthread_internals.cpp | 12 | ||||
-rw-r--r-- | libc/bionic/pthread_key.cpp | 6 | ||||
-rw-r--r-- | libc/bionic/thread_atexit.cpp | 6 |
16 files changed, 183 insertions, 182 deletions
diff --git a/libc/bionic/debug_stacktrace.cpp b/libc/bionic/debug_stacktrace.cpp index 4207a3f..713e761 100644 --- a/libc/bionic/debug_stacktrace.cpp +++ b/libc/bionic/debug_stacktrace.cpp @@ -50,30 +50,30 @@ typedef struct _Unwind_Context __unwind_context; typedef _Unwind_Context __unwind_context; #endif -static mapinfo_t* gMapInfo = NULL; -static void* gDemangler; +static mapinfo_t* g_map_info = NULL; +static void* g_demangler; typedef char* (*DemanglerFn)(const char*, char*, size_t*, int*); -static DemanglerFn gDemanglerFn = NULL; +static DemanglerFn g_demangler_fn = NULL; __LIBC_HIDDEN__ void backtrace_startup() { - gMapInfo = mapinfo_create(getpid()); - gDemangler = dlopen("libgccdemangle.so", RTLD_NOW); - if (gDemangler != NULL) { - void* sym = dlsym(gDemangler, "__cxa_demangle"); - gDemanglerFn = reinterpret_cast<DemanglerFn>(sym); + g_map_info = mapinfo_create(getpid()); + g_demangler = dlopen("libgccdemangle.so", RTLD_NOW); + if (g_demangler != NULL) { + void* sym = dlsym(g_demangler, "__cxa_demangle"); + g_demangler_fn = reinterpret_cast<DemanglerFn>(sym); } } __LIBC_HIDDEN__ void backtrace_shutdown() { - mapinfo_destroy(gMapInfo); - dlclose(gDemangler); + mapinfo_destroy(g_map_info); + dlclose(g_demangler); } static char* demangle(const char* symbol) { - if (gDemanglerFn == NULL) { + if (g_demangler_fn == NULL) { return NULL; } - return (*gDemanglerFn)(symbol, NULL, NULL, NULL); + return (*g_demangler_fn)(symbol, NULL, NULL, NULL); } struct stack_crawl_state_t { @@ -147,7 +147,7 @@ __LIBC_HIDDEN__ void log_backtrace(uintptr_t* frames, size_t frame_count) { } uintptr_t rel_pc; - const mapinfo_t* mi = (gMapInfo != NULL) ? mapinfo_find(gMapInfo, frames[i], &rel_pc) : NULL; + const mapinfo_t* mi = (g_map_info != NULL) ? mapinfo_find(g_map_info, frames[i], &rel_pc) : NULL; const char* soname = (mi != NULL) ? mi->name : info.dli_fname; if (soname == NULL) { soname = "<unknown>"; diff --git a/libc/bionic/libc_init_common.cpp b/libc/bionic/libc_init_common.cpp index 1cfaf50..abf2d36 100644 --- a/libc/bionic/libc_init_common.cpp +++ b/libc/bionic/libc_init_common.cpp @@ -77,7 +77,7 @@ static size_t get_main_thread_stack_size() { * apply to linker-private copies and will not be visible from libc later on. * * Note: this function creates a pthread_internal_t for the initial thread and - * stores the pointer in TLS, but does not add it to pthread's gThreadList. This + * stores the pointer in TLS, but does not add it to pthread's thread list. This * has to be done later from libc itself (see __libc_init_common). * * This function also stores a pointer to the kernel argument block in a TLS slot to be diff --git a/libc/bionic/libc_logging.cpp b/libc/bionic/libc_logging.cpp index 79472b3..8966a5f 100644 --- a/libc/bionic/libc_logging.cpp +++ b/libc/bionic/libc_logging.cpp @@ -45,7 +45,7 @@ #include <time.h> #include <unistd.h> -static pthread_mutex_t gAbortMsgLock = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t g_abort_msg_lock = PTHREAD_MUTEX_INITIALIZER; __LIBC_HIDDEN__ abort_msg_t** __abort_message_ptr; // Accessible to __libc_init_common. @@ -643,7 +643,7 @@ void __libc_fatal(const char* format, ...) { } void __android_set_abort_message(const char* msg) { - ScopedPthreadMutexLocker locker(&gAbortMsgLock); + ScopedPthreadMutexLocker locker(&g_abort_msg_lock); if (__abort_message_ptr == NULL) { // We must have crashed _very_ early. diff --git a/libc/bionic/locale.cpp b/libc/bionic/locale.cpp index 3752fa4..5a1da43 100644 --- a/libc/bionic/locale.cpp +++ b/libc/bionic/locale.cpp @@ -36,43 +36,43 @@ struct __locale_t { // Because we only support one locale, these are just tokens with no data. }; -static pthread_once_t gLocaleOnce = PTHREAD_ONCE_INIT; -static lconv gLocale; +static pthread_once_t g_locale_once = PTHREAD_ONCE_INIT; +static lconv g_locale; // We don't use pthread_once for this so that we know when the resource (a TLS slot) will be taken. -static pthread_key_t gUselocaleKey; +static pthread_key_t g_uselocale_key; __attribute__((constructor)) static void __bionic_tls_uselocale_key_init() { - pthread_key_create(&gUselocaleKey, NULL); + pthread_key_create(&g_uselocale_key, NULL); } static void __locale_init() { - gLocale.decimal_point = const_cast<char*>("."); + g_locale.decimal_point = const_cast<char*>("."); char* not_available = const_cast<char*>(""); - gLocale.thousands_sep = not_available; - gLocale.grouping = not_available; - gLocale.int_curr_symbol = not_available; - gLocale.currency_symbol = not_available; - gLocale.mon_decimal_point = not_available; - gLocale.mon_thousands_sep = not_available; - gLocale.mon_grouping = not_available; - gLocale.positive_sign = not_available; - gLocale.negative_sign = not_available; - - gLocale.int_frac_digits = CHAR_MAX; - gLocale.frac_digits = CHAR_MAX; - gLocale.p_cs_precedes = CHAR_MAX; - gLocale.p_sep_by_space = CHAR_MAX; - gLocale.n_cs_precedes = CHAR_MAX; - gLocale.n_sep_by_space = CHAR_MAX; - gLocale.p_sign_posn = CHAR_MAX; - gLocale.n_sign_posn = CHAR_MAX; - gLocale.int_p_cs_precedes = CHAR_MAX; - gLocale.int_p_sep_by_space = CHAR_MAX; - gLocale.int_n_cs_precedes = CHAR_MAX; - gLocale.int_n_sep_by_space = CHAR_MAX; - gLocale.int_p_sign_posn = CHAR_MAX; - gLocale.int_n_sign_posn = CHAR_MAX; + g_locale.thousands_sep = not_available; + g_locale.grouping = not_available; + g_locale.int_curr_symbol = not_available; + g_locale.currency_symbol = not_available; + g_locale.mon_decimal_point = not_available; + g_locale.mon_thousands_sep = not_available; + g_locale.mon_grouping = not_available; + g_locale.positive_sign = not_available; + g_locale.negative_sign = not_available; + + g_locale.int_frac_digits = CHAR_MAX; + g_locale.frac_digits = CHAR_MAX; + g_locale.p_cs_precedes = CHAR_MAX; + g_locale.p_sep_by_space = CHAR_MAX; + g_locale.n_cs_precedes = CHAR_MAX; + g_locale.n_sep_by_space = CHAR_MAX; + g_locale.p_sign_posn = CHAR_MAX; + g_locale.n_sign_posn = CHAR_MAX; + g_locale.int_p_cs_precedes = CHAR_MAX; + g_locale.int_p_sep_by_space = CHAR_MAX; + g_locale.int_n_cs_precedes = CHAR_MAX; + g_locale.int_n_sep_by_space = CHAR_MAX; + g_locale.int_p_sign_posn = CHAR_MAX; + g_locale.int_n_sign_posn = CHAR_MAX; } static bool __bionic_current_locale_is_utf8 = false; @@ -88,8 +88,8 @@ static locale_t __new_locale() { } lconv* localeconv() { - pthread_once(&gLocaleOnce, __locale_init); - return &gLocale; + pthread_once(&g_locale_once, __locale_init); + return &g_locale; } locale_t duplocale(locale_t l) { @@ -140,7 +140,7 @@ char* setlocale(int category, const char* locale_name) { } locale_t uselocale(locale_t new_locale) { - locale_t old_locale = static_cast<locale_t>(pthread_getspecific(gUselocaleKey)); + locale_t old_locale = static_cast<locale_t>(pthread_getspecific(g_uselocale_key)); // If this is the first call to uselocale(3) on this thread, we return LC_GLOBAL_LOCALE. if (old_locale == NULL) { @@ -148,7 +148,7 @@ locale_t uselocale(locale_t new_locale) { } if (new_locale != NULL) { - pthread_setspecific(gUselocaleKey, new_locale); + pthread_setspecific(g_uselocale_key, new_locale); } return old_locale; diff --git a/libc/bionic/malloc_debug_check.cpp b/libc/bionic/malloc_debug_check.cpp index 7dd8e3c..11578a3 100644 --- a/libc/bionic/malloc_debug_check.cpp +++ b/libc/bionic/malloc_debug_check.cpp @@ -53,8 +53,8 @@ #include "private/ScopedPthreadMutexLocker.h" /* libc.debug.malloc.backlog */ -extern unsigned int gMallocDebugBacklog; -extern int gMallocDebugLevel; +extern unsigned int g_malloc_debug_backlog; +extern int g_malloc_debug_level; #define MAX_BACKTRACE_DEPTH 16 #define ALLOCATION_TAG 0x1ee7d00d @@ -108,8 +108,10 @@ static inline const hdr_t* const_meta(const void* user) { return reinterpret_cast<const hdr_t*>(user) - 1; } - -static unsigned gAllocatedBlockCount; +// TODO: introduce a struct for this global state. +// There are basically two lists here, the regular list and the backlog list. +// We should be able to remove the duplication. +static unsigned g_allocated_block_count; static hdr_t* tail; static hdr_t* head; static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; @@ -188,7 +190,7 @@ static inline void add(hdr_t* hdr, size_t size) { hdr->size = size; init_front_guard(hdr); init_rear_guard(hdr); - ++gAllocatedBlockCount; + ++g_allocated_block_count; add_locked(hdr, &tail, &head); } @@ -199,7 +201,7 @@ static inline int del(hdr_t* hdr) { ScopedPthreadMutexLocker locker(&lock); del_locked(hdr, &tail, &head); - --gAllocatedBlockCount; + --g_allocated_block_count; return 0; } @@ -306,7 +308,7 @@ static inline void del_from_backlog(hdr_t* hdr) { static inline int del_leak(hdr_t* hdr, int* safe) { ScopedPthreadMutexLocker locker(&lock); - return del_and_check_locked(hdr, &tail, &head, &gAllocatedBlockCount, safe); + return del_and_check_locked(hdr, &tail, &head, &g_allocated_block_count, safe); } static inline void add_to_backlog(hdr_t* hdr) { @@ -316,7 +318,7 @@ static inline void add_to_backlog(hdr_t* hdr) { add_locked(hdr, &backlog_tail, &backlog_head); poison(hdr); /* If we've exceeded the maximum backlog, clear it up */ - while (backlog_num > gMallocDebugBacklog) { + while (backlog_num > g_malloc_debug_backlog) { hdr_t* gone = backlog_tail; del_from_backlog_locked(gone); dlfree(gone->base); @@ -508,7 +510,7 @@ extern "C" size_t chk_malloc_usable_size(const void* ptr) { static void ReportMemoryLeaks() { // We only track leaks at level 10. - if (gMallocDebugLevel != 10) { + if (g_malloc_debug_level != 10) { return; } @@ -522,13 +524,13 @@ static void ReportMemoryLeaks() { exe[count] = '\0'; } - if (gAllocatedBlockCount == 0) { + if (g_allocated_block_count == 0) { log_message("+++ %s did not leak", exe); return; } size_t index = 1; - const size_t total = gAllocatedBlockCount; + const size_t total = g_allocated_block_count; while (head != NULL) { int safe; hdr_t* block = head; diff --git a/libc/bionic/malloc_debug_common.cpp b/libc/bionic/malloc_debug_common.cpp index 4fa4b6e..356ecb1 100644 --- a/libc/bionic/malloc_debug_common.cpp +++ b/libc/bionic/malloc_debug_common.cpp @@ -54,8 +54,8 @@ */ int gMallocLeakZygoteChild = 0; -pthread_mutex_t gAllocationsMutex = PTHREAD_MUTEX_INITIALIZER; -HashTable gHashTable; +pthread_mutex_t g_allocations_mutex = PTHREAD_MUTEX_INITIALIZER; +HashTable g_hash_table; // ============================================================================= // output functions @@ -122,9 +122,9 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize, } *totalMemory = 0; - ScopedPthreadMutexLocker locker(&gAllocationsMutex); + ScopedPthreadMutexLocker locker(&g_allocations_mutex); - if (gHashTable.count == 0) { + if (g_hash_table.count == 0) { *info = NULL; *overallSize = 0; *infoSize = 0; @@ -132,12 +132,12 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize, return; } - HashEntry** list = static_cast<HashEntry**>(dlmalloc(sizeof(void*) * gHashTable.count)); + HashEntry** list = static_cast<HashEntry**>(dlmalloc(sizeof(void*) * g_hash_table.count)); // get the entries into an array to be sorted int index = 0; for (size_t i = 0 ; i < HASHTABLE_SIZE ; ++i) { - HashEntry* entry = gHashTable.slots[i]; + HashEntry* entry = g_hash_table.slots[i]; while (entry != NULL) { list[index] = entry; *totalMemory = *totalMemory + @@ -149,7 +149,7 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize, // XXX: the protocol doesn't allow variable size for the stack trace (yet) *infoSize = (sizeof(size_t) * 2) + (sizeof(uintptr_t) * BACKTRACE_SIZE); - *overallSize = *infoSize * gHashTable.count; + *overallSize = *infoSize * g_hash_table.count; *backtraceSize = BACKTRACE_SIZE; // now get a byte array big enough for this @@ -161,10 +161,10 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize, return; } - qsort(list, gHashTable.count, sizeof(void*), hash_entry_compare); + qsort(list, g_hash_table.count, sizeof(void*), hash_entry_compare); uint8_t* head = *info; - const int count = gHashTable.count; + const int count = g_hash_table.count; for (int i = 0 ; i < count ; ++i) { HashEntry* entry = list[i]; size_t entrySize = (sizeof(size_t) * 2) + (sizeof(uintptr_t) * entry->numEntries); @@ -253,7 +253,7 @@ extern "C" size_t malloc_usable_size(const void* mem) { #include "private/libc_logging.h" /* Table for dispatching malloc calls, depending on environment. */ -static MallocDebug gMallocUse __attribute__((aligned(32))) = { +static MallocDebug g_malloc_dispatch_table __attribute__((aligned(32))) = { dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign, dlmalloc_usable_size }; @@ -286,11 +286,11 @@ static void* libc_malloc_impl_handle = NULL; * backlog we use to detect multiple frees. If the property is not set, the * backlog length defaults to BACKLOG_DEFAULT_LEN. */ -unsigned int gMallocDebugBacklog; +unsigned int g_malloc_debug_backlog; #define BACKLOG_DEFAULT_LEN 100 /* The value of libc.debug.malloc. */ -int gMallocDebugLevel; +int g_malloc_debug_level; template<typename FunctionType> static void InitMallocFunction(void* malloc_impl_handler, FunctionType* func, const char* prefix, const char* suffix) { @@ -304,7 +304,7 @@ static void InitMallocFunction(void* malloc_impl_handler, FunctionType* func, co static void InitMalloc(void* malloc_impl_handler, MallocDebug* table, const char* prefix) { __libc_format_log(ANDROID_LOG_INFO, "libc", "%s: using libc.debug.malloc %d (%s)\n", - __progname, gMallocDebugLevel, prefix); + __progname, g_malloc_debug_level, prefix); InitMallocFunction<MallocDebugMalloc>(malloc_impl_handler, &table->malloc, prefix, "malloc"); InitMallocFunction<MallocDebugFree>(malloc_impl_handler, &table->free, prefix, "free"); @@ -332,7 +332,7 @@ static void malloc_init_impl() { if (__system_property_get("ro.kernel.memcheck", memcheck_tracing)) { if (memcheck_tracing[0] != '0') { // Emulator has started with memory tracing enabled. Enforce it. - gMallocDebugLevel = 20; + g_malloc_debug_level = 20; memcheck_enabled = 1; } } @@ -340,13 +340,13 @@ static void malloc_init_impl() { /* If debug level has not been set by memcheck option in the emulator, * lets grab it from libc.debug.malloc system property. */ - if (gMallocDebugLevel == 0 && __system_property_get("libc.debug.malloc", env)) { - gMallocDebugLevel = atoi(env); + if (g_malloc_debug_level == 0 && __system_property_get("libc.debug.malloc", env)) { + g_malloc_debug_level = atoi(env); } /* Debug level 0 means that we should use dlxxx allocation * routines (default). */ - if (gMallocDebugLevel == 0) { + if (g_malloc_debug_level == 0) { return; } @@ -360,24 +360,24 @@ static void malloc_init_impl() { } // mksh is way too leaky. http://b/7291287. - if (gMallocDebugLevel >= 10) { + if (g_malloc_debug_level >= 10) { if (strcmp(__progname, "sh") == 0 || strcmp(__progname, "/system/bin/sh") == 0) { return; } } // Choose the appropriate .so for the requested debug level. - switch (gMallocDebugLevel) { + switch (g_malloc_debug_level) { case 1: case 5: case 10: { char debug_backlog[PROP_VALUE_MAX]; if (__system_property_get("libc.debug.malloc.backlog", debug_backlog)) { - gMallocDebugBacklog = atoi(debug_backlog); - info_log("%s: setting backlog length to %d\n", __progname, gMallocDebugBacklog); + g_malloc_debug_backlog = atoi(debug_backlog); + info_log("%s: setting backlog length to %d\n", __progname, g_malloc_debug_backlog); } - if (gMallocDebugBacklog == 0) { - gMallocDebugBacklog = BACKLOG_DEFAULT_LEN; + if (g_malloc_debug_backlog == 0) { + g_malloc_debug_backlog = BACKLOG_DEFAULT_LEN; } so_name = "libc_malloc_debug_leak.so"; break; @@ -386,7 +386,7 @@ static void malloc_init_impl() { // Quick check: debug level 20 can only be handled in emulator. if (!qemu_running) { error_log("%s: Debug level %d can only be set in emulator\n", - __progname, gMallocDebugLevel); + __progname, g_malloc_debug_level); return; } // Make sure that memory checking has been enabled in emulator. @@ -398,7 +398,7 @@ static void malloc_init_impl() { so_name = "libc_malloc_debug_qemu.so"; break; default: - error_log("%s: Debug level %d is unknown\n", __progname, gMallocDebugLevel); + error_log("%s: Debug level %d is unknown\n", __progname, g_malloc_debug_level); return; } @@ -406,7 +406,7 @@ static void malloc_init_impl() { void* malloc_impl_handle = dlopen(so_name, RTLD_LAZY); if (malloc_impl_handle == NULL) { error_log("%s: Missing module %s required for malloc debug level %d: %s", - __progname, so_name, gMallocDebugLevel, dlerror()); + __progname, so_name, g_malloc_debug_level, dlerror()); return; } @@ -424,7 +424,7 @@ static void malloc_init_impl() { return; } - if (gMallocDebugLevel == 20) { + if (g_malloc_debug_level == 20) { // For memory checker we need to do extra initialization. typedef int (*MemCheckInit)(int, const char*); MemCheckInit memcheck_initialize = @@ -445,35 +445,35 @@ static void malloc_init_impl() { // Initialize malloc dispatch table with appropriate routines. - switch (gMallocDebugLevel) { + switch (g_malloc_debug_level) { case 1: - InitMalloc(malloc_impl_handle, &gMallocUse, "leak"); + InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "leak"); break; case 5: - InitMalloc(malloc_impl_handle, &gMallocUse, "fill"); + InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "fill"); break; case 10: - InitMalloc(malloc_impl_handle, &gMallocUse, "chk"); + InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "chk"); break; case 20: - InitMalloc(malloc_impl_handle, &gMallocUse, "qemu_instrumented"); + InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "qemu_instrumented"); break; default: break; } // Make sure dispatch table is initialized - if ((gMallocUse.malloc == NULL) || - (gMallocUse.free == NULL) || - (gMallocUse.calloc == NULL) || - (gMallocUse.realloc == NULL) || - (gMallocUse.memalign == NULL) || - (gMallocUse.malloc_usable_size == NULL)) { + if ((g_malloc_dispatch_table.malloc == NULL) || + (g_malloc_dispatch_table.free == NULL) || + (g_malloc_dispatch_table.calloc == NULL) || + (g_malloc_dispatch_table.realloc == NULL) || + (g_malloc_dispatch_table.memalign == NULL) || + (g_malloc_dispatch_table.malloc_usable_size == NULL)) { error_log("%s: some symbols for libc.debug.malloc level %d were not found (see above)", - __progname, gMallocDebugLevel); + __progname, g_malloc_debug_level); dlclose(malloc_impl_handle); } else { - __libc_malloc_dispatch = &gMallocUse; + __libc_malloc_dispatch = &g_malloc_dispatch_table; libc_malloc_impl_handle = malloc_impl_handle; } } diff --git a/libc/bionic/malloc_debug_leak.cpp b/libc/bionic/malloc_debug_leak.cpp index 3397def..146cddc 100644 --- a/libc/bionic/malloc_debug_leak.cpp +++ b/libc/bionic/malloc_debug_leak.cpp @@ -61,8 +61,8 @@ // Global variables defined in malloc_debug_common.c extern int gMallocLeakZygoteChild; -extern pthread_mutex_t gAllocationsMutex; -extern HashTable gHashTable; +extern pthread_mutex_t g_allocations_mutex; +extern HashTable g_hash_table; // ============================================================================= // stack trace functions @@ -138,7 +138,7 @@ static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size size |= SIZE_FLAG_ZYGOTE_CHILD; } - HashEntry* entry = find_entry(&gHashTable, slot, backtrace, numEntries, size); + HashEntry* entry = find_entry(&g_hash_table, slot, backtrace, numEntries, size); if (entry != NULL) { entry->allocations++; @@ -151,20 +151,20 @@ static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size entry->allocations = 1; entry->slot = slot; entry->prev = NULL; - entry->next = gHashTable.slots[slot]; + entry->next = g_hash_table.slots[slot]; entry->numEntries = numEntries; entry->size = size; memcpy(entry->backtrace, backtrace, numEntries * sizeof(uintptr_t)); - gHashTable.slots[slot] = entry; + g_hash_table.slots[slot] = entry; if (entry->next != NULL) { entry->next->prev = entry; } // we just added an entry, increase the size of the hashtable - gHashTable.count++; + g_hash_table.count++; } return entry; @@ -174,7 +174,7 @@ static int is_valid_entry(HashEntry* entry) { if (entry != NULL) { int i; for (i = 0 ; i < HASHTABLE_SIZE ; i++) { - HashEntry* e1 = gHashTable.slots[i]; + HashEntry* e1 = g_hash_table.slots[i]; while (e1 != NULL) { if (e1 == entry) { @@ -198,11 +198,11 @@ static void remove_entry(HashEntry* entry) { if (prev == NULL) { // we are the head of the list. set the head to be next - gHashTable.slots[entry->slot] = entry->next; + g_hash_table.slots[entry->slot] = entry->next; } // we just removed and entry, decrease the size of the hashtable - gHashTable.count--; + g_hash_table.count--; } // ============================================================================= @@ -277,7 +277,7 @@ extern "C" void* leak_malloc(size_t bytes) { void* base = dlmalloc(size); if (base != NULL) { - ScopedPthreadMutexLocker locker(&gAllocationsMutex); + ScopedPthreadMutexLocker locker(&g_allocations_mutex); uintptr_t backtrace[BACKTRACE_SIZE]; size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE); @@ -296,7 +296,7 @@ extern "C" void* leak_malloc(size_t bytes) { extern "C" void leak_free(void* mem) { if (mem != NULL) { - ScopedPthreadMutexLocker locker(&gAllocationsMutex); + ScopedPthreadMutexLocker locker(&g_allocations_mutex); // check the guard to make sure it is valid AllocationEntry* header = to_header(mem); diff --git a/libc/bionic/pthread_accessor.h b/libc/bionic/pthread_accessor.h index ccb71bb..df4a5a2 100644 --- a/libc/bionic/pthread_accessor.h +++ b/libc/bionic/pthread_accessor.h @@ -26,7 +26,7 @@ class pthread_accessor { public: explicit pthread_accessor(pthread_t desired_thread) { Lock(); - for (thread_ = gThreadList; thread_ != NULL; thread_ = thread_->next) { + for (thread_ = g_thread_list; thread_ != NULL; thread_ = thread_->next) { if (thread_ == reinterpret_cast<pthread_internal_t*>(desired_thread)) { break; } @@ -41,7 +41,7 @@ class pthread_accessor { if (is_locked_) { is_locked_ = false; thread_ = NULL; - pthread_mutex_unlock(&gThreadListLock); + pthread_mutex_unlock(&g_thread_list_lock); } } @@ -54,7 +54,7 @@ class pthread_accessor { bool is_locked_; void Lock() { - pthread_mutex_lock(&gThreadListLock); + pthread_mutex_lock(&g_thread_list_lock); is_locked_ = true; } diff --git a/libc/bionic/pthread_atfork.cpp b/libc/bionic/pthread_atfork.cpp index c0664a9..b845f7d 100644 --- a/libc/bionic/pthread_atfork.cpp +++ b/libc/bionic/pthread_atfork.cpp @@ -29,8 +29,6 @@ #include <errno.h> #include <pthread.h> -static pthread_mutex_t gAtForkListMutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER; - struct atfork_t { atfork_t* next; atfork_t* prev; @@ -45,7 +43,8 @@ struct atfork_list_t { atfork_t* last; }; -static atfork_list_t gAtForkList = { NULL, NULL }; +static pthread_mutex_t g_atfork_list_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER; +static atfork_list_t g_atfork_list = { NULL, NULL }; void __bionic_atfork_run_prepare() { // We lock the atfork list here, unlock it in the parent, and reset it in the child. @@ -54,12 +53,12 @@ void __bionic_atfork_run_prepare() { // // TODO: If a handler tries to mutate the list, they'll block. We should probably copy // the list before forking, and have prepare, parent, and child all work on the consistent copy. - pthread_mutex_lock(&gAtForkListMutex); + pthread_mutex_lock(&g_atfork_list_mutex); // Call pthread_atfork() prepare handlers. POSIX states that the prepare // handlers should be called in the reverse order of the parent/child // handlers, so we iterate backwards. - for (atfork_t* it = gAtForkList.last; it != NULL; it = it->prev) { + for (atfork_t* it = g_atfork_list.last; it != NULL; it = it->prev) { if (it->prepare != NULL) { it->prepare(); } @@ -67,23 +66,23 @@ void __bionic_atfork_run_prepare() { } void __bionic_atfork_run_child() { - for (atfork_t* it = gAtForkList.first; it != NULL; it = it->next) { + for (atfork_t* it = g_atfork_list.first; it != NULL; it = it->next) { if (it->child != NULL) { it->child(); } } - gAtForkListMutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER; + g_atfork_list_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER; } void __bionic_atfork_run_parent() { - for (atfork_t* it = gAtForkList.first; it != NULL; it = it->next) { + for (atfork_t* it = g_atfork_list.first; it != NULL; it = it->next) { if (it->parent != NULL) { it->parent(); } } - pthread_mutex_unlock(&gAtForkListMutex); + pthread_mutex_unlock(&g_atfork_list_mutex); } int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void)) { @@ -96,20 +95,20 @@ int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(voi entry->parent = parent; entry->child = child; - pthread_mutex_lock(&gAtForkListMutex); + pthread_mutex_lock(&g_atfork_list_mutex); // Append 'entry' to the list. entry->next = NULL; - entry->prev = gAtForkList.last; + entry->prev = g_atfork_list.last; if (entry->prev != NULL) { entry->prev->next = entry; } - if (gAtForkList.first == NULL) { - gAtForkList.first = entry; + if (g_atfork_list.first == NULL) { + g_atfork_list.first = entry; } - gAtForkList.last = entry; + g_atfork_list.last = entry; - pthread_mutex_unlock(&gAtForkListMutex); + pthread_mutex_unlock(&g_atfork_list_mutex); return 0; } diff --git a/libc/bionic/pthread_create.cpp b/libc/bionic/pthread_create.cpp index f62dc15..2afb651 100644 --- a/libc/bionic/pthread_create.cpp +++ b/libc/bionic/pthread_create.cpp @@ -52,9 +52,9 @@ extern "C" __attribute__((noinline)) void _thread_created_hook(pid_t) {} extern "C" __LIBC_HIDDEN__ void __init_user_desc(struct user_desc*, int, void*); #endif -static pthread_mutex_t gPthreadStackCreationLock = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t g_pthread_stack_creation_ock = PTHREAD_MUTEX_INITIALIZER; -static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t g_debugger_notification_lock = PTHREAD_MUTEX_INITIALIZER; extern "C" int __isthreaded; @@ -111,7 +111,7 @@ int __init_thread(pthread_internal_t* thread, bool add_to_thread_list) { } static void* __create_thread_stack(pthread_internal_t* thread) { - ScopedPthreadMutexLocker lock(&gPthreadStackCreationLock); + ScopedPthreadMutexLocker lock(&g_pthread_stack_creation_ock); // Create a new private anonymous map. int prot = PROT_READ | PROT_WRITE; @@ -258,7 +258,7 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr, // Notify any debuggers about the new thread. { - ScopedPthreadMutexLocker debugger_locker(&gDebuggerNotificationLock); + ScopedPthreadMutexLocker debugger_locker(&g_debugger_notification_lock); _thread_created_hook(thread->tid); } diff --git a/libc/bionic/pthread_debug.cpp b/libc/bionic/pthread_debug.cpp index f01f040..be89b46 100644 --- a/libc/bionic/pthread_debug.cpp +++ b/libc/bionic/pthread_debug.cpp @@ -127,9 +127,9 @@ extern const char* __progname; * level 2 : deadlock prediction enabled w/ call stacks */ #define CAPTURE_CALLSTACK 2 -static int sPthreadDebugLevel = 0; -static pid_t sPthreadDebugDisabledThread = -1; -static pthread_mutex_t sDbgLock = PTHREAD_MUTEX_INITIALIZER; +static int g_pthread_debug_level = 0; +static pid_t g_pthread_debug_disabled_thread = -1; +static pthread_mutex_t g_dbg_lock = PTHREAD_MUTEX_INITIALIZER; /****************************************************************************/ @@ -138,23 +138,23 @@ static pthread_mutex_t sDbgLock = PTHREAD_MUTEX_INITIALIZER; */ #define DBG_ALLOC_BLOCK_SIZE PAGESIZE -static size_t sDbgAllocOffset = DBG_ALLOC_BLOCK_SIZE; -static char* sDbgAllocPtr = NULL; +static size_t g_dbg_alloc_offset = DBG_ALLOC_BLOCK_SIZE; +static char* g_dbg_alloc_ptr = NULL; template <typename T> static T* DbgAllocLocked(size_t count = 1) { size_t size = sizeof(T) * count; - if ((sDbgAllocOffset + size) > DBG_ALLOC_BLOCK_SIZE) { - sDbgAllocOffset = 0; - sDbgAllocPtr = reinterpret_cast<char*>(mmap(NULL, DBG_ALLOC_BLOCK_SIZE, + if ((g_dbg_alloc_offset + size) > DBG_ALLOC_BLOCK_SIZE) { + g_dbg_alloc_offset = 0; + g_dbg_alloc_ptr = reinterpret_cast<char*>(mmap(NULL, DBG_ALLOC_BLOCK_SIZE, PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE, 0, 0)); - if (sDbgAllocPtr == MAP_FAILED) { + if (g_dbg_alloc_ptr == MAP_FAILED) { return NULL; } } - void* addr = sDbgAllocPtr + sDbgAllocOffset; - sDbgAllocOffset += size; + void* addr = g_dbg_alloc_ptr + g_dbg_alloc_offset; + g_dbg_alloc_offset += size; return reinterpret_cast<T*>(addr); } @@ -365,7 +365,7 @@ static int traverseTree(MutexInfo* obj, MutexInfo const* objParent) uintptr_t addrs[STACK_TRACE_DEPTH]; /* Turn off prediction temporarily in this thread while logging */ - sPthreadDebugDisabledThread = gettid(); + g_pthread_debug_disabled_thread = gettid(); backtrace_startup(); @@ -384,7 +384,7 @@ static int traverseTree(MutexInfo* obj, MutexInfo const* objParent) MutexInfo* parent = cur->parents.list[i]; if (parent->owner == ourtid) { LOGW("--- pthread_mutex_t at %p\n", parent->mutex); - if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) { + if (g_pthread_debug_level >= CAPTURE_CALLSTACK) { log_backtrace(parent->stackTrace, parent->stackDepth); } cur = parent; @@ -405,7 +405,7 @@ static int traverseTree(MutexInfo* obj, MutexInfo const* objParent) MutexInfo* child = pList->list[i]; if (!traverseTree(child, obj)) { LOGW("--- pthread_mutex_t at %p\n", obj->mutex); - if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) { + if (g_pthread_debug_level >= CAPTURE_CALLSTACK) { int index = historyListHas(&obj->parents, objParent); if ((size_t)index < (size_t)obj->stacks.count) { log_backtrace(obj->stacks.stack[index].addrs, obj->stacks.stack[index].depth); @@ -435,7 +435,7 @@ static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object) object->owner = tid; object->lockCount = 0; - if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) { + if (g_pthread_debug_level >= CAPTURE_CALLSTACK) { // always record the call stack when acquiring a lock. // it's not efficient, but is useful during diagnostics object->stackDepth = get_backtrace(object->stackTrace, STACK_TRACE_DEPTH); @@ -451,7 +451,7 @@ static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object) if (historyListHas(&mrl->children, object) >= 0) return; - pthread_mutex_lock_unchecked(&sDbgLock); + pthread_mutex_lock_unchecked(&g_dbg_lock); linkParentToChild(mrl, object); if (!traverseTree(object, mrl)) { @@ -459,20 +459,20 @@ static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object) LOGW("%s\n", kEndBanner); unlinkParentFromChild(mrl, object); // reenable pthread debugging for this thread - sPthreadDebugDisabledThread = -1; + g_pthread_debug_disabled_thread = -1; } else { // record the call stack for this link // NOTE: the call stack is added at the same index // as mrl in object->parents[] // ie: object->parents.count == object->stacks.count, which is // also the index. - if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) { + if (g_pthread_debug_level >= CAPTURE_CALLSTACK) { callstackListAdd(&object->stacks, object->stackDepth, object->stackTrace); } } - pthread_mutex_unlock_unchecked(&sDbgLock); + pthread_mutex_unlock_unchecked(&g_dbg_lock); } static void mutex_unlock_checked(MutexInfo* object) @@ -509,8 +509,8 @@ struct HashTable { HashEntry* slots[HASHTABLE_SIZE]; }; -static HashTable sMutexMap; -static HashTable sThreadMap; +static HashTable g_mutex_map; +static HashTable g_thread_map; /****************************************************************************/ @@ -593,9 +593,9 @@ static int MutexInfo_equals(void const* data, void const* key) { static MutexInfo* get_mutex_info(pthread_mutex_t *mutex) { - pthread_mutex_lock_unchecked(&sDbgLock); + pthread_mutex_lock_unchecked(&g_dbg_lock); - HashEntry* entry = hashmap_lookup(&sMutexMap, + HashEntry* entry = hashmap_lookup(&g_mutex_map, &mutex, sizeof(mutex), &MutexInfo_equals); if (entry->data == NULL) { @@ -604,7 +604,7 @@ static MutexInfo* get_mutex_info(pthread_mutex_t *mutex) initMutexInfo(mutex_info, mutex); } - pthread_mutex_unlock_unchecked(&sDbgLock); + pthread_mutex_unlock_unchecked(&g_dbg_lock); return (MutexInfo *)entry->data; } @@ -617,9 +617,9 @@ static int ThreadInfo_equals(void const* data, void const* key) { static ThreadInfo* get_thread_info(pid_t pid) { - pthread_mutex_lock_unchecked(&sDbgLock); + pthread_mutex_lock_unchecked(&g_dbg_lock); - HashEntry* entry = hashmap_lookup(&sThreadMap, + HashEntry* entry = hashmap_lookup(&g_thread_map, &pid, sizeof(pid), &ThreadInfo_equals); if (entry->data == NULL) { @@ -628,7 +628,7 @@ static ThreadInfo* get_thread_info(pid_t pid) initThreadInfo(thread_info, pid); } - pthread_mutex_unlock_unchecked(&sDbgLock); + pthread_mutex_unlock_unchecked(&g_dbg_lock); return (ThreadInfo *)entry->data; } @@ -672,9 +672,9 @@ static MutexInfo* get_most_recently_locked() { extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex) { - if (sPthreadDebugLevel == 0) return; + if (g_pthread_debug_level == 0) return; // prediction disabled for this thread - if (sPthreadDebugDisabledThread == gettid()) + if (g_pthread_debug_disabled_thread == gettid()) return; MutexInfo* object = get_mutex_info(mutex); MutexInfo* mrl = get_most_recently_locked(); @@ -689,9 +689,9 @@ extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_lock_check(pthread_mutex_t * extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex) { - if (sPthreadDebugLevel == 0) return; + if (g_pthread_debug_level == 0) return; // prediction disabled for this thread - if (sPthreadDebugDisabledThread == gettid()) + if (g_pthread_debug_disabled_thread == gettid()) return; MutexInfo* object = get_mutex_info(mutex); remove_most_recently_locked(object); @@ -709,8 +709,8 @@ extern "C" __LIBC_HIDDEN__ void pthread_debug_init() { if (level) { LOGI("pthread deadlock detection level %d enabled for pid %d (%s)", level, getpid(), __progname); - hashmap_init(&sMutexMap); - sPthreadDebugLevel = level; + hashmap_init(&g_mutex_map); + g_pthread_debug_level = level; } } #endif diff --git a/libc/bionic/pthread_exit.cpp b/libc/bionic/pthread_exit.cpp index 2692762..de818cd 100644 --- a/libc/bionic/pthread_exit.cpp +++ b/libc/bionic/pthread_exit.cpp @@ -92,7 +92,7 @@ void pthread_exit(void* return_value) { size_t stack_size = thread->attr.stack_size; bool user_allocated_stack = ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0); - pthread_mutex_lock(&gThreadListLock); + pthread_mutex_lock(&g_thread_list_lock); if ((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) != 0) { // The thread is detached, so we can free the pthread_internal_t. // First make sure that the kernel does not try to clear the tid field @@ -110,7 +110,7 @@ void pthread_exit(void* return_value) { // pthread_join is responsible for destroying the pthread_internal_t for non-detached threads. // The kernel will futex_wake on the pthread_internal_t::tid field to wake pthread_join. } - pthread_mutex_unlock(&gThreadListLock); + pthread_mutex_unlock(&g_thread_list_lock); if (user_allocated_stack) { // Cleaning up this thread's stack is the creator's responsibility, not ours. diff --git a/libc/bionic/pthread_internal.h b/libc/bionic/pthread_internal.h index 41f4636..295d9d6 100644 --- a/libc/bionic/pthread_internal.h +++ b/libc/bionic/pthread_internal.h @@ -86,8 +86,8 @@ __LIBC_HIDDEN__ void _pthread_internal_remove_locked(pthread_internal_t* thread) */ #define PTHREAD_STACK_SIZE_DEFAULT ((1 * 1024 * 1024) - SIGSTKSZ) -__LIBC_HIDDEN__ extern pthread_internal_t* gThreadList; -__LIBC_HIDDEN__ extern pthread_mutex_t gThreadListLock; +__LIBC_HIDDEN__ extern pthread_internal_t* g_thread_list; +__LIBC_HIDDEN__ extern pthread_mutex_t g_thread_list_lock; __LIBC_HIDDEN__ int __timespec_from_absolute(timespec*, const timespec*, clockid_t); diff --git a/libc/bionic/pthread_internals.cpp b/libc/bionic/pthread_internals.cpp index d4d6099..baa95d9 100644 --- a/libc/bionic/pthread_internals.cpp +++ b/libc/bionic/pthread_internals.cpp @@ -33,8 +33,8 @@ #include "private/bionic_tls.h" #include "private/ScopedPthreadMutexLocker.h" -pthread_internal_t* gThreadList = NULL; -pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER; +pthread_internal_t* g_thread_list = NULL; +pthread_mutex_t g_thread_list_lock = PTHREAD_MUTEX_INITIALIZER; void _pthread_internal_remove_locked(pthread_internal_t* thread) { if (thread->next != NULL) { @@ -43,7 +43,7 @@ void _pthread_internal_remove_locked(pthread_internal_t* thread) { if (thread->prev != NULL) { thread->prev->next = thread->next; } else { - gThreadList = thread->next; + g_thread_list = thread->next; } // The main thread is not heap-allocated. See __libc_init_tls for the declaration, @@ -54,15 +54,15 @@ void _pthread_internal_remove_locked(pthread_internal_t* thread) { } void _pthread_internal_add(pthread_internal_t* thread) { - ScopedPthreadMutexLocker locker(&gThreadListLock); + ScopedPthreadMutexLocker locker(&g_thread_list_lock); // We insert at the head. - thread->next = gThreadList; + thread->next = g_thread_list; thread->prev = NULL; if (thread->next != NULL) { thread->next->prev = thread; } - gThreadList = thread; + g_thread_list = thread; } pthread_internal_t* __get_thread(void) { diff --git a/libc/bionic/pthread_key.cpp b/libc/bionic/pthread_key.cpp index 6cc68af..27eab27 100644 --- a/libc/bionic/pthread_key.cpp +++ b/libc/bionic/pthread_key.cpp @@ -210,8 +210,8 @@ int pthread_key_delete(pthread_key_t key) { } // Clear value in all threads. - pthread_mutex_lock(&gThreadListLock); - for (pthread_internal_t* t = gThreadList; t != NULL; t = t->next) { + pthread_mutex_lock(&g_thread_list_lock); + for (pthread_internal_t* t = g_thread_list; t != NULL; t = t->next) { // Skip zombie threads. They don't have a valid TLS area any more. // Similarly, it is possible to have t->tls == NULL for threads that // were just recently created through pthread_create() but whose @@ -226,7 +226,7 @@ int pthread_key_delete(pthread_key_t key) { } tls_map.DeleteKey(key); - pthread_mutex_unlock(&gThreadListLock); + pthread_mutex_unlock(&g_thread_list_lock); return 0; } diff --git a/libc/bionic/thread_atexit.cpp b/libc/bionic/thread_atexit.cpp index cad65d3..68c119d 100644 --- a/libc/bionic/thread_atexit.cpp +++ b/libc/bionic/thread_atexit.cpp @@ -30,7 +30,7 @@ #include <pthread.h> -static pthread_mutex_t gAtExitLock = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t g_atexit_lock = PTHREAD_MUTEX_INITIALIZER; __BEGIN_DECLS __LIBC_HIDDEN__ void _thread_atexit_lock(); @@ -38,9 +38,9 @@ __LIBC_HIDDEN__ void _thread_atexit_unlock(); __END_DECLS void _thread_atexit_lock() { - pthread_mutex_lock(&gAtExitLock); + pthread_mutex_lock(&g_atexit_lock); } void _thread_atexit_unlock() { - pthread_mutex_unlock(&gAtExitLock); + pthread_mutex_unlock(&g_atexit_lock); } |