summaryrefslogtreecommitdiffstats
path: root/libc/bionic
diff options
context:
space:
mode:
authorYabin Cui <yabinc@google.com>2015-03-05 20:35:32 -0800
committerYabin Cui <yabinc@google.com>2015-04-14 13:32:09 -0700
commit5ddbb3f936ee44555a46020239e49ab45109a806 (patch)
tree70395ecf0897580781856c68cbcd57019ff831e3 /libc/bionic
parent4bd8f9637daaada333ff35945b00cfe6cb822376 (diff)
downloadbionic-5ddbb3f936ee44555a46020239e49ab45109a806.zip
bionic-5ddbb3f936ee44555a46020239e49ab45109a806.tar.gz
bionic-5ddbb3f936ee44555a46020239e49ab45109a806.tar.bz2
Prevent using static-allocated pthread keys before creation.
Bug: 19993460 Change-Id: I244dea7f5df3c8384f88aa48d635348fafc9cbaf
Diffstat (limited to 'libc/bionic')
-rw-r--r--libc/bionic/pthread_key.cpp24
1 files changed, 18 insertions, 6 deletions
diff --git a/libc/bionic/pthread_key.cpp b/libc/bionic/pthread_key.cpp
index 65e0879..6d77afa 100644
--- a/libc/bionic/pthread_key.cpp
+++ b/libc/bionic/pthread_key.cpp
@@ -57,8 +57,15 @@ static inline bool SeqOfKeyInUse(uintptr_t seq) {
return seq & (1 << SEQ_KEY_IN_USE_BIT);
}
+#define KEY_VALID_FLAG (1 << 31)
+
+static_assert(sizeof(pthread_key_t) == sizeof(int) && static_cast<pthread_key_t>(-1) < 0,
+ "pthread_key_t should be typedef to int");
+
static inline bool KeyInValidRange(pthread_key_t key) {
- return key >= 0 && key < BIONIC_PTHREAD_KEY_COUNT;
+ // key < 0 means bit 31 is set.
+ // Then key < (2^31 | BIONIC_PTHREAD_KEY_COUNT) means the index part of key < BIONIC_PTHREAD_KEY_COUNT.
+ return (key < (KEY_VALID_FLAG | BIONIC_PTHREAD_KEY_COUNT));
}
// Called from pthread_exit() to remove all pthread keys. This must call the destructor of
@@ -114,7 +121,7 @@ int pthread_key_create(pthread_key_t* key, void (*key_destructor)(void*)) {
while (!SeqOfKeyInUse(seq)) {
if (atomic_compare_exchange_weak(&key_map[i].seq, &seq, seq + SEQ_INCREMENT_STEP)) {
atomic_store(&key_map[i].key_destructor, reinterpret_cast<uintptr_t>(key_destructor));
- *key = i;
+ *key = i | KEY_VALID_FLAG;
return 0;
}
}
@@ -127,9 +134,10 @@ int pthread_key_create(pthread_key_t* key, void (*key_destructor)(void*)) {
// responsibility of the caller to properly dispose of the corresponding data
// and resources, using any means it finds suitable.
int pthread_key_delete(pthread_key_t key) {
- if (!KeyInValidRange(key)) {
+ if (__predict_false(!KeyInValidRange(key))) {
return EINVAL;
}
+ key &= ~KEY_VALID_FLAG;
// Increase seq to invalidate values in all threads.
uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
if (SeqOfKeyInUse(seq)) {
@@ -141,9 +149,10 @@ int pthread_key_delete(pthread_key_t key) {
}
void* pthread_getspecific(pthread_key_t key) {
- if (!KeyInValidRange(key)) {
+ if (__predict_false(!KeyInValidRange(key))) {
return NULL;
}
+ key &= ~KEY_VALID_FLAG;
uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
pthread_key_data_t* data = &(__get_thread()->key_data[key]);
// It is user's responsibility to synchornize between the creation and use of pthread keys,
@@ -151,16 +160,19 @@ void* pthread_getspecific(pthread_key_t key) {
if (__predict_true(SeqOfKeyInUse(seq) && data->seq == seq)) {
return data->data;
}
+ // We arrive here when current thread holds the seq of an deleted pthread key. So the
+ // data is for the deleted pthread key, and should be cleared.
data->data = NULL;
return NULL;
}
int pthread_setspecific(pthread_key_t key, const void* ptr) {
- if (!KeyInValidRange(key)) {
+ if (__predict_false(!KeyInValidRange(key))) {
return EINVAL;
}
+ key &= ~KEY_VALID_FLAG;
uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
- if (SeqOfKeyInUse(seq)) {
+ if (__predict_true(SeqOfKeyInUse(seq))) {
pthread_key_data_t* data = &(__get_thread()->key_data[key]);
data->seq = seq;
data->data = const_cast<void*>(ptr);