summaryrefslogtreecommitdiff
path: root/tests/stdatomic_test.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/stdatomic_test.cpp')
-rw-r--r--tests/stdatomic_test.cpp85
1 files changed, 80 insertions, 5 deletions
diff --git a/tests/stdatomic_test.cpp b/tests/stdatomic_test.cpp
index 5e88c8801..222bd9c80 100644
--- a/tests/stdatomic_test.cpp
+++ b/tests/stdatomic_test.cpp
@@ -14,11 +14,10 @@
* limitations under the License.
*/
-#include <gtest/gtest.h>
-
-#if !defined(__GLIBC__) /* TODO: fix our prebuilt toolchains! */
-
#include <stdatomic.h>
+#include <gtest/gtest.h>
+#include <pthread.h>
+#include <stdint.h>
TEST(stdatomic, LOCK_FREE) {
ASSERT_TRUE(ATOMIC_BOOL_LOCK_FREE);
@@ -167,4 +166,80 @@ TEST(stdatomic, atomic_fetch_and) {
ASSERT_EQ(0x002, atomic_load(&i));
}
-#endif
+// And a rudimentary test of acquire-release memory ordering:
+
+constexpr static uint_least32_t BIG = 10000000ul; // Assumed even below.
+
+struct three_atomics {
+ atomic_uint_least32_t x;
+ char a[123]; // Everything in different cache lines,
+ // increase chance of compiler getting alignment wrong.
+ atomic_uint_least32_t y;
+ char b[4013];
+ atomic_uint_least32_t z;
+};
+
+// Very simple acquire/release memory ordering sanity check.
+static void* writer(void* arg) {
+ three_atomics* a = reinterpret_cast<three_atomics*>(arg);
+ for (uint_least32_t i = 0; i <= BIG; i+=2) {
+ atomic_store_explicit(&a->x, i, memory_order_relaxed);
+ atomic_store_explicit(&a->z, i, memory_order_relaxed);
+ atomic_store_explicit(&a->y, i, memory_order_release);
+ atomic_store_explicit(&a->x, i+1, memory_order_relaxed);
+ atomic_store_explicit(&a->z, i+1, memory_order_relaxed);
+ atomic_store_explicit(&a->y, i+1, memory_order_release);
+ }
+ return 0;
+}
+
+static void* reader(void* arg) {
+ three_atomics* a = reinterpret_cast<three_atomics*>(arg);
+ uint_least32_t xval = 0, yval = 0, zval = 0;
+ size_t repeat = 0;
+ size_t repeat_limit = 1000;
+ while (yval != BIG + 1) {
+ yval = atomic_load_explicit(&a->y, memory_order_acquire);
+ zval = atomic_load_explicit(&a->z, memory_order_relaxed);
+ xval = atomic_load_explicit(&a->x, memory_order_relaxed);
+ // If we see a given value of y, the immediately preceding
+ // stores to z and x, or later ones, should also be visible.
+ if (zval < yval) {
+ // Cant just ASSERT, since we are in a non-void function.
+ ADD_FAILURE() << "acquire-release ordering violation: "
+ << zval << " < " << yval << ", " << xval << "\n";
+ return 0; // Only report once.
+ }
+ if (xval < yval) {
+ // Cant just ASSERT, since we are in a non-void function.
+ ADD_FAILURE() << "acquire-release ordering violation: "
+ << xval << " < " << yval << ", " << zval << "\n";
+ return 0; // Only report once.
+ }
+ if (repeat < repeat_limit) ++repeat;
+ }
+ // The following assertion is not technically guaranteed to hold.
+ // But if it fails to hold, this test was useless, and we have a
+ // serious scheduling issue that we should probably know about.
+ EXPECT_EQ(repeat, repeat_limit);
+ return 0;
+}
+
+TEST(stdatomic, ordering) {
+ // Run a memory ordering sanity test.
+ void* result;
+ three_atomics a;
+ atomic_init(&a.x, 0ul);
+ atomic_init(&a.y, 0ul);
+ atomic_init(&a.z, 0ul);
+ pthread_t t1,t2;
+ ASSERT_EQ(0, pthread_create(&t1, 0, reader, &a));
+ ASSERT_EQ(0, pthread_create(&t2, 0, writer, &a));
+ ASSERT_EQ(0, pthread_join(t1, &result));
+ EXPECT_EQ(0, result);
+ ASSERT_EQ(0, pthread_join(t2, &result));
+ EXPECT_EQ(0, result);
+ EXPECT_EQ(atomic_load_explicit(&a.x, memory_order_consume), BIG + 1);
+ EXPECT_EQ(atomic_load_explicit(&a.y, memory_order_seq_cst), BIG + 1);
+ EXPECT_EQ(atomic_load(&a.z), BIG + 1);
+}