summaryrefslogtreecommitdiffstats
path: root/libc/bionic
diff options
context:
space:
mode:
authorYabin Cui <yabinc@google.com>2015-03-13 14:22:05 -0700
committerYabin Cui <yabinc@google.com>2015-03-13 19:35:05 -0700
commit2fabea47ac9475bcc52aff0715819d18aa5bdf1d (patch)
treec0cad55497897c934661e49d2fc4c330b4d97cba /libc/bionic
parent81c31bdd43e50538fa45f5e7783782a5ae5666e9 (diff)
downloadbionic-2fabea47ac9475bcc52aff0715819d18aa5bdf1d.zip
bionic-2fabea47ac9475bcc52aff0715819d18aa5bdf1d.tar.gz
bionic-2fabea47ac9475bcc52aff0715819d18aa5bdf1d.tar.bz2
Hide content of pthread_rwlock_t in pthread_rwlock_internal_t.
Bug: 19249079 Change-Id: Ifbe634c716b6793bef897ec5134b55eb44c6b8d5
Diffstat (limited to 'libc/bionic')
-rw-r--r--libc/bionic/pthread_rwlock.cpp257
1 files changed, 128 insertions, 129 deletions
diff --git a/libc/bionic/pthread_rwlock.cpp b/libc/bionic/pthread_rwlock.cpp
index 83243ab..f089940 100644
--- a/libc/bionic/pthread_rwlock.cpp
+++ b/libc/bionic/pthread_rwlock.cpp
@@ -62,18 +62,6 @@
#define RWLOCKATTR_DEFAULT 0
#define RWLOCKATTR_SHARED_MASK 0x0010
-static inline bool rwlock_is_shared(const pthread_rwlock_t* rwlock) {
- return rwlock->attr == PTHREAD_PROCESS_SHARED;
-}
-
-static bool timespec_from_absolute(timespec* rel_timeout, const timespec* abs_timeout) {
- if (abs_timeout != NULL) {
- if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout, CLOCK_REALTIME)) {
- return false;
- }
- }
- return true;
-}
int pthread_rwlockattr_init(pthread_rwlockattr_t* attr) {
*attr = PTHREAD_PROCESS_PRIVATE;
@@ -101,37 +89,33 @@ int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* attr, int* pshared
return 0;
}
-static inline atomic_int* STATE_ATOMIC_POINTER(pthread_rwlock_t* rwlock) {
- static_assert(sizeof(atomic_int) == sizeof(rwlock->state),
- "rwlock->state should actually be atomic_int in implementation.");
-
- // We prefer casting to atomic_int instead of declaring rwlock->state to be atomic_int directly.
- // Because using the second method pollutes pthread.h, and causes an error when compiling libcxx.
- return reinterpret_cast<atomic_int*>(&rwlock->state);
-}
+struct pthread_rwlock_internal_t {
+ atomic_int state; // 0=unlock, -1=writer lock, +n=reader lock
+ atomic_int writer_thread_id;
+ atomic_uint pending_readers;
+ atomic_uint pending_writers;
+ int32_t attr;
-static inline atomic_int* WRITER_THREAD_ID_ATOMIC_POINTER(pthread_rwlock_t* rwlock) {
- static_assert(sizeof(atomic_int) == sizeof(rwlock->writer_thread_id),
- "rwlock->writer_thread_id should actually be atomic_int in implementation.");
-
- return reinterpret_cast<atomic_int*>(&rwlock->writer_thread_id);
-}
-
-static inline atomic_uint* PENDING_READERS_ATOMIC_POINTER(pthread_rwlock_t* rwlock) {
- static_assert(sizeof(atomic_uint) == sizeof(rwlock->pending_readers),
- "rwlock->pending_readers should actually be atomic_uint in implementation.");
+ bool process_shared() const {
+ return attr == PTHREAD_PROCESS_SHARED;
+ }
- return reinterpret_cast<atomic_uint*>(&rwlock->pending_readers);
+#if defined(__LP64__)
+ char __reserved[36];
+#else
+ char __reserved[20];
+#endif
+};
+
+static inline pthread_rwlock_internal_t* __get_internal_rwlock(pthread_rwlock_t* rwlock_interface) {
+ static_assert(sizeof(pthread_rwlock_t) == sizeof(pthread_rwlock_internal_t),
+ "pthread_rwlock_t should actually be pthread_rwlock_internal_t in implementation.");
+ return reinterpret_cast<pthread_rwlock_internal_t*>(rwlock_interface);
}
-static inline atomic_uint* PENDING_WRITERS_ATOMIC_POINTER(pthread_rwlock_t* rwlock) {
- static_assert(sizeof(atomic_uint) == sizeof(rwlock->pending_writers),
- "rwlock->pending_writers should actually be atomic_uint in implementation.");
-
- return reinterpret_cast<atomic_uint*>(&rwlock->pending_writers);
-}
+int pthread_rwlock_init(pthread_rwlock_t* rwlock_interface, const pthread_rwlockattr_t* attr) {
+ pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
-int pthread_rwlock_init(pthread_rwlock_t* rwlock, const pthread_rwlockattr_t* attr) {
if (__predict_true(attr == NULL)) {
rwlock->attr = 0;
} else {
@@ -145,53 +129,62 @@ int pthread_rwlock_init(pthread_rwlock_t* rwlock, const pthread_rwlockattr_t* at
}
}
- atomic_init(STATE_ATOMIC_POINTER(rwlock), 0);
- atomic_init(WRITER_THREAD_ID_ATOMIC_POINTER(rwlock), 0);
- atomic_init(PENDING_READERS_ATOMIC_POINTER(rwlock), 0);
- atomic_init(PENDING_WRITERS_ATOMIC_POINTER(rwlock), 0);
+ atomic_init(&rwlock->state, 0);
+ atomic_init(&rwlock->writer_thread_id, 0);
+ atomic_init(&rwlock->pending_readers, 0);
+ atomic_init(&rwlock->pending_writers, 0);
return 0;
}
-int pthread_rwlock_destroy(pthread_rwlock_t* rwlock) {
- if (rwlock->state != 0) {
+int pthread_rwlock_destroy(pthread_rwlock_t* rwlock_interface) {
+ pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
+
+ if (atomic_load_explicit(&rwlock->state, memory_order_relaxed) != 0) {
return EBUSY;
}
return 0;
}
-static int __pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
- if (__predict_false(__get_thread()->tid ==
- atomic_load_explicit(WRITER_THREAD_ID_ATOMIC_POINTER(rwlock), memory_order_relaxed))) {
+static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock,
+ const timespec* abs_timeout_or_null) {
+
+ if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id,
+ memory_order_relaxed))) {
return EDEADLK;
}
- timespec ts;
- timespec* rel_timeout = (abs_timeout == NULL) ? NULL : &ts;
-
- atomic_int* state_ptr = STATE_ATOMIC_POINTER(rwlock);
-
while (true) {
- int cur_state = atomic_load_explicit(state_ptr, memory_order_relaxed);
- if (__predict_true(cur_state >= 0)) {
- if (atomic_compare_exchange_weak_explicit(state_ptr, &cur_state, cur_state + 1,
+ int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
+ if (__predict_true(old_state >= 0)) {
+ if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, old_state + 1,
memory_order_acquire, memory_order_relaxed)) {
return 0;
}
} else {
- if (!timespec_from_absolute(rel_timeout, abs_timeout)) {
- return ETIMEDOUT;
+ timespec ts;
+ timespec* rel_timeout = NULL;
+
+ if (abs_timeout_or_null != NULL) {
+ rel_timeout = &ts;
+ if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) {
+ return ETIMEDOUT;
+ }
}
- atomic_uint* pending_readers_ptr = PENDING_READERS_ATOMIC_POINTER(rwlock);
// To avoid losing wake ups, the pending_readers increment should be observed before
// futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used
// here. Because only a seq_cst fence can ensure sequential consistency for non-atomic
// operations in futex_wait.
- atomic_fetch_add_explicit(pending_readers_ptr, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&rwlock->pending_readers, 1, memory_order_relaxed);
+
atomic_thread_fence(memory_order_seq_cst);
- int ret = __futex_wait_ex(state_ptr, rwlock_is_shared(rwlock), cur_state, rel_timeout);
- atomic_fetch_sub_explicit(pending_readers_ptr, 1, memory_order_relaxed);
+
+ int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state,
+ rel_timeout);
+
+ atomic_fetch_sub_explicit(&rwlock->pending_readers, 1, memory_order_relaxed);
+
if (ret == -ETIMEDOUT) {
return ETIMEDOUT;
}
@@ -199,44 +192,49 @@ static int __pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec
}
}
-static int __pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
- if (__predict_false(__get_thread()->tid ==
- atomic_load_explicit(WRITER_THREAD_ID_ATOMIC_POINTER(rwlock), memory_order_relaxed))) {
+static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock,
+ const timespec* abs_timeout_or_null) {
+
+ if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id,
+ memory_order_relaxed))) {
return EDEADLK;
}
- timespec ts;
- timespec* rel_timeout = (abs_timeout == NULL) ? NULL : &ts;
-
- atomic_int* state_ptr = STATE_ATOMIC_POINTER(rwlock);
-
while (true) {
- int cur_state = atomic_load_explicit(state_ptr, memory_order_relaxed);
- if (__predict_true(cur_state == 0)) {
- if (atomic_compare_exchange_weak_explicit(state_ptr, &cur_state, -1,
+ int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
+ if (__predict_true(old_state == 0)) {
+ if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1,
memory_order_acquire, memory_order_relaxed)) {
// writer_thread_id is protected by rwlock and can only be modified in rwlock write
// owner thread. Other threads may read it for EDEADLK error checking, atomic operation
// is safe enough for it.
- atomic_store_explicit(WRITER_THREAD_ID_ATOMIC_POINTER(rwlock), __get_thread()->tid,
- memory_order_relaxed);
+ atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed);
return 0;
}
} else {
- if (!timespec_from_absolute(rel_timeout, abs_timeout)) {
- return ETIMEDOUT;
+ timespec ts;
+ timespec* rel_timeout = NULL;
+
+ if (abs_timeout_or_null != NULL) {
+ rel_timeout = &ts;
+ if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) {
+ return ETIMEDOUT;
+ }
}
- atomic_uint* pending_writers_ptr = PENDING_WRITERS_ATOMIC_POINTER(rwlock);
-
// To avoid losing wake ups, the pending_writers increment should be observed before
// futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used
// here. Because only a seq_cst fence can ensure sequential consistency for non-atomic
// operations in futex_wait.
- atomic_fetch_add_explicit(pending_writers_ptr, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);
+
atomic_thread_fence(memory_order_seq_cst);
- int ret = __futex_wait_ex(state_ptr, rwlock_is_shared(rwlock), cur_state, rel_timeout);
- atomic_fetch_sub_explicit(pending_writers_ptr, 1, memory_order_relaxed);
+
+ int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state,
+ rel_timeout);
+
+ atomic_fetch_sub_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);
+
if (ret == -ETIMEDOUT) {
return ETIMEDOUT;
}
@@ -244,86 +242,87 @@ static int __pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec
}
}
-int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock) {
+int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock_interface) {
+ pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
+
return __pthread_rwlock_timedrdlock(rwlock, NULL);
}
-int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
+int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) {
+ pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
+
return __pthread_rwlock_timedrdlock(rwlock, abs_timeout);
}
-int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock) {
- atomic_int* state_ptr = STATE_ATOMIC_POINTER(rwlock);
- int cur_state = atomic_load_explicit(state_ptr, memory_order_relaxed);
+int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock_interface) {
+ pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
- while (cur_state >= 0) {
- if (atomic_compare_exchange_weak_explicit(state_ptr, &cur_state, cur_state + 1,
- memory_order_acquire, memory_order_relaxed)) {
- return 0;
- }
+ int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
+
+ while (old_state >= 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state,
+ old_state + 1, memory_order_acquire, memory_order_relaxed)) {
}
- return EBUSY;
+ return (old_state >= 0) ? 0 : EBUSY;
}
-int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock) {
+int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock_interface) {
+ pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
+
return __pthread_rwlock_timedwrlock(rwlock, NULL);
}
-int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
+int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) {
+ pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
+
return __pthread_rwlock_timedwrlock(rwlock, abs_timeout);
}
-int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock) {
- atomic_int* state_ptr = STATE_ATOMIC_POINTER(rwlock);
- int cur_state = atomic_load_explicit(state_ptr, memory_order_relaxed);
+int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock_interface) {
+ pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
- while (cur_state == 0) {
- if (atomic_compare_exchange_weak_explicit(state_ptr, &cur_state, -1,
+ int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
+
+ while (old_state == 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1,
memory_order_acquire, memory_order_relaxed)) {
- int tid = __get_thread()->tid;
- atomic_store_explicit(WRITER_THREAD_ID_ATOMIC_POINTER(rwlock), tid, memory_order_relaxed);
- return 0;
- }
+ }
+ if (old_state == 0) {
+ atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed);
+ return 0;
}
return EBUSY;
}
-int pthread_rwlock_unlock(pthread_rwlock_t* rwlock) {
- int tid = __get_thread()->tid;
- atomic_int* state_ptr = STATE_ATOMIC_POINTER(rwlock);
- atomic_uint* pending_readers_ptr = PENDING_READERS_ATOMIC_POINTER(rwlock);
- atomic_uint* pending_writers_ptr = PENDING_WRITERS_ATOMIC_POINTER(rwlock);
+int pthread_rwlock_unlock(pthread_rwlock_t* rwlock_interface) {
+ pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
- int cur_state = atomic_load_explicit(state_ptr, memory_order_relaxed);
- if (__predict_false(cur_state == 0)) {
+ int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
+ if (__predict_false(old_state == 0)) {
return EPERM;
- } else if (cur_state == -1) {
- atomic_int* writer_thread_id_ptr = WRITER_THREAD_ID_ATOMIC_POINTER(rwlock);
- if (atomic_load_explicit(writer_thread_id_ptr, memory_order_relaxed) != tid) {
+ } else if (old_state == -1) {
+ if (atomic_load_explicit(&rwlock->writer_thread_id, memory_order_relaxed) != __get_thread()->tid) {
return EPERM;
}
// We're no longer the owner.
- atomic_store_explicit(writer_thread_id_ptr, 0, memory_order_relaxed);
+ atomic_store_explicit(&rwlock->writer_thread_id, 0, memory_order_relaxed);
// Change state from -1 to 0.
- atomic_store_explicit(state_ptr, 0, memory_order_release);
- goto wakeup_waiters;
+ atomic_store_explicit(&rwlock->state, 0, memory_order_release);
- } else { // cur_state > 0
+ } else { // old_state > 0
// Reduce state by 1.
- while (!atomic_compare_exchange_weak_explicit(state_ptr, &cur_state, cur_state - 1,
- memory_order_release, memory_order_relaxed)) {
- if (cur_state <= 0) {
- return EPERM;
- }
+ while (old_state > 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state,
+ old_state - 1, memory_order_release, memory_order_relaxed)) {
}
- if (cur_state == 1) {
- goto wakeup_waiters;
+
+ if (old_state <= 0) {
+ return EPERM;
+ } else if (old_state > 1) {
+ return 0;
}
+ // old_state = 1, which means the last reader calling unlock. It has to wake up waiters.
}
- return 0;
-wakeup_waiters:
+ // If having waiters, wake up them.
// To avoid losing wake ups, the update of state should be observed before reading
// pending_readers/pending_writers by all threads. Use read locking as an example:
// read locking thread unlocking thread
@@ -335,9 +334,9 @@ wakeup_waiters:
// in a situation that the locking thread reads state as negative and needs to wait,
// while the unlocking thread reads pending_readers as zero and doesn't need to wake up waiters.
atomic_thread_fence(memory_order_seq_cst);
- if (__predict_false(atomic_load_explicit(pending_readers_ptr, memory_order_relaxed) > 0 ||
- atomic_load_explicit(pending_writers_ptr, memory_order_relaxed) > 0)) {
- __futex_wake_ex(state_ptr, rwlock_is_shared(rwlock), INT_MAX);
+ if (__predict_false(atomic_load_explicit(&rwlock->pending_readers, memory_order_relaxed) > 0 ||
+ atomic_load_explicit(&rwlock->pending_writers, memory_order_relaxed) > 0)) {
+ __futex_wake_ex(&rwlock->state, rwlock->process_shared(), INT_MAX);
}
return 0;
}