summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorHans Boehm <hboehm@google.com>2014-08-19 16:14:01 -0700
committerHans Boehm <hboehm@google.com>2014-09-02 10:32:33 -0700
commitc8cf3513ecb265ba3aadc846aa2113290a504c44 (patch)
tree087fc68abafe155789abe2f966c27941973be902 /tests
parentc191a8adee7ff000b6dd71ee336a12118e2a4733 (diff)
downloadbionic-c8cf3513ecb265ba3aadc846aa2113290a504c44.zip
bionic-c8cf3513ecb265ba3aadc846aa2113290a504c44.tar.gz
bionic-c8cf3513ecb265ba3aadc846aa2113290a504c44.tar.bz2
Fix, generalize stdatomic.h; improve test.
We seem to use this stdatomic.h sometimes, and slightly different prebuilts at other times, making them all difficult to test, and making it unclear which one we're testing. This generalizes the bionic header so that it can be used directly as the prebuilt header as well. So long as they don't diverge again, that should somewhat improve test coverage. Use the correct builtin for atomic_is_lock_free. Fix atomic_flag_init. Turn on atomic tests even with __GLIBC__, since they now appear to pass. Include uchar.h in stdatomic.h where needed. Add a basic memory ordering test. Fix bit-rotted comments in bionic tests makefile. Prerequisite for fixing b/16880454 and Bug:16513433 Change-Id: If6a14c1075b379395ba5d93357d56025c0ffab68 (cherry picked from commit 00aaea364501b3b0abe58dae461136159df1e356)
Diffstat (limited to 'tests')
-rw-r--r--tests/Android.mk6
-rw-r--r--tests/stdatomic_test.cpp85
2 files changed, 84 insertions, 7 deletions
diff --git a/tests/Android.mk b/tests/Android.mk
index b370b92..8184bf7 100644
--- a/tests/Android.mk
+++ b/tests/Android.mk
@@ -237,7 +237,8 @@ include $(LOCAL_PATH)/Android.build.mk
# -----------------------------------------------------------------------------
# Tests for the device using bionic's .so. Run with:
-# adb shell /data/nativetest/bionic-unit-tests/bionic-unit-tests
+# adb shell /data/nativetest/bionic-unit-tests/bionic-unit-tests32
+# adb shell /data/nativetest/bionic-unit-tests/bionic-unit-tests64
# -----------------------------------------------------------------------------
bionic-unit-tests_whole_static_libraries := \
libBionicTests \
@@ -269,7 +270,8 @@ include $(LOCAL_PATH)/Android.build.mk
# -----------------------------------------------------------------------------
# Tests for the device linked against bionic's static library. Run with:
-# adb shell /data/nativetest/bionic-unit-tests-static/bionic-unit-tests-static
+# adb shell /data/nativetest/bionic-unit-tests-static/bionic-unit-tests-static32
+# adb shell /data/nativetest/bionic-unit-tests-static/bionic-unit-tests-static64
# -----------------------------------------------------------------------------
bionic-unit-tests-static_whole_static_libraries := \
libBionicTests \
diff --git a/tests/stdatomic_test.cpp b/tests/stdatomic_test.cpp
index 5e88c88..222bd9c 100644
--- a/tests/stdatomic_test.cpp
+++ b/tests/stdatomic_test.cpp
@@ -14,11 +14,10 @@
* limitations under the License.
*/
-#include <gtest/gtest.h>
-
-#if !defined(__GLIBC__) /* TODO: fix our prebuilt toolchains! */
-
#include <stdatomic.h>
+#include <gtest/gtest.h>
+#include <pthread.h>
+#include <stdint.h>
TEST(stdatomic, LOCK_FREE) {
ASSERT_TRUE(ATOMIC_BOOL_LOCK_FREE);
@@ -167,4 +166,80 @@ TEST(stdatomic, atomic_fetch_and) {
ASSERT_EQ(0x002, atomic_load(&i));
}
-#endif
+// And a rudimentary test of acquire-release memory ordering:
+
+constexpr static uint_least32_t BIG = 10000000ul; // Assumed even below.
+
+struct three_atomics {
+ atomic_uint_least32_t x;
+ char a[123]; // Everything in different cache lines,
+ // increase chance of compiler getting alignment wrong.
+ atomic_uint_least32_t y;
+ char b[4013];
+ atomic_uint_least32_t z;
+};
+
+// Very simple acquire/release memory ordering sanity check.
+static void* writer(void* arg) {
+ three_atomics* a = reinterpret_cast<three_atomics*>(arg);
+ for (uint_least32_t i = 0; i <= BIG; i+=2) {
+ atomic_store_explicit(&a->x, i, memory_order_relaxed);
+ atomic_store_explicit(&a->z, i, memory_order_relaxed);
+ atomic_store_explicit(&a->y, i, memory_order_release);
+ atomic_store_explicit(&a->x, i+1, memory_order_relaxed);
+ atomic_store_explicit(&a->z, i+1, memory_order_relaxed);
+ atomic_store_explicit(&a->y, i+1, memory_order_release);
+ }
+ return 0;
+}
+
+static void* reader(void* arg) {
+ three_atomics* a = reinterpret_cast<three_atomics*>(arg);
+ uint_least32_t xval = 0, yval = 0, zval = 0;
+ size_t repeat = 0;
+ size_t repeat_limit = 1000;
+ while (yval != BIG + 1) {
+ yval = atomic_load_explicit(&a->y, memory_order_acquire);
+ zval = atomic_load_explicit(&a->z, memory_order_relaxed);
+ xval = atomic_load_explicit(&a->x, memory_order_relaxed);
+ // If we see a given value of y, the immediately preceding
+ // stores to z and x, or later ones, should also be visible.
+ if (zval < yval) {
+ // Cant just ASSERT, since we are in a non-void function.
+ ADD_FAILURE() << "acquire-release ordering violation: "
+ << zval << " < " << yval << ", " << xval << "\n";
+ return 0; // Only report once.
+ }
+ if (xval < yval) {
+ // Cant just ASSERT, since we are in a non-void function.
+ ADD_FAILURE() << "acquire-release ordering violation: "
+ << xval << " < " << yval << ", " << zval << "\n";
+ return 0; // Only report once.
+ }
+ if (repeat < repeat_limit) ++repeat;
+ }
+ // The following assertion is not technically guaranteed to hold.
+ // But if it fails to hold, this test was useless, and we have a
+ // serious scheduling issue that we should probably know about.
+ EXPECT_EQ(repeat, repeat_limit);
+ return 0;
+}
+
+TEST(stdatomic, ordering) {
+ // Run a memory ordering sanity test.
+ void* result;
+ three_atomics a;
+ atomic_init(&a.x, 0ul);
+ atomic_init(&a.y, 0ul);
+ atomic_init(&a.z, 0ul);
+ pthread_t t1,t2;
+ ASSERT_EQ(0, pthread_create(&t1, 0, reader, &a));
+ ASSERT_EQ(0, pthread_create(&t2, 0, writer, &a));
+ ASSERT_EQ(0, pthread_join(t1, &result));
+ EXPECT_EQ(0, result);
+ ASSERT_EQ(0, pthread_join(t2, &result));
+ EXPECT_EQ(0, result);
+ EXPECT_EQ(atomic_load_explicit(&a.x, memory_order_consume), BIG + 1);
+ EXPECT_EQ(atomic_load_explicit(&a.y, memory_order_seq_cst), BIG + 1);
+ EXPECT_EQ(atomic_load(&a.z), BIG + 1);
+}