diff --git a/libpandabase/tests/genmc/mutex_test_2.cpp b/libpandabase/tests/genmc/mutex_test_2.cpp index f8ae3a5b8227e808b095b502fbe54affdf8f95d0..9bc010bb17ab8edd711f2e673ba07066ceb3c59c 100644 --- a/libpandabase/tests/genmc/mutex_test_2.cpp +++ b/libpandabase/tests/genmc/mutex_test_2.cpp @@ -20,7 +20,6 @@ // The tests checks mutex try lock -pthread_t pthread_self(void); // Copy of mutex storage, after complete implementation should totally replace mutex::current_tid thread_local pthread_t current_tid; diff --git a/libpandabase/tests/genmc/mutex_test_3.cpp b/libpandabase/tests/genmc/mutex_test_3.cpp index 5369247258e8ad501c4d9237db7dc986d4034c24..58b4b30fa1f840ff16c98f9a24770433e61ec170 100644 --- a/libpandabase/tests/genmc/mutex_test_3.cpp +++ b/libpandabase/tests/genmc/mutex_test_3.cpp @@ -20,7 +20,6 @@ // The tests checks a recursive mutex -pthread_t pthread_self(void); // Copy of mutex storage, after complete implementation should totally replace mutex::current_tid thread_local pthread_t current_tid; diff --git a/libpandabase/tests/genmc/mutex_test_4.cpp b/libpandabase/tests/genmc/mutex_test_4.cpp index bb06076e61a1ff0f088aa24cabbf10997a06e798..cfddaf534863479563db15c5006702db35f32ff6 100644 --- a/libpandabase/tests/genmc/mutex_test_4.cpp +++ b/libpandabase/tests/genmc/mutex_test_4.cpp @@ -20,7 +20,6 @@ // The tests checks work with two mutexes -pthread_t pthread_self(void); // Copy of mutex storage, after complete implementation should totally replace mutex::current_tid thread_local pthread_t current_tid; diff --git a/libpandabase/tests/genmc/mutex_test_genmc.cpp b/libpandabase/tests/genmc/mutex_test_genmc.cpp index 9199e645200bd24e1e6fb3ad8d5001caecfccfb8..3020dacf7164b02f7f60b641a4c04f1dc8c6a8e5 100644 --- a/libpandabase/tests/genmc/mutex_test_genmc.cpp +++ b/libpandabase/tests/genmc/mutex_test_genmc.cpp @@ -19,7 +19,6 @@ #define MC_ON #include "../../../platforms/unix/libpandabase/futex/fmutex.cpp" -pthread_t pthread_self(void); // Copy of mutex storage, after complete implementation should totally replace mutex::current_tid thread_local pthread_t current_tid; diff --git a/platforms/unix/libpandabase/futex/fmutex.cpp b/platforms/unix/libpandabase/futex/fmutex.cpp index d89060405a28474da4cdfbac3acc8acb514d7a8d..fecfa0e69e0d7a098c25c5da9823c247dd0cea1b 100644 --- a/platforms/unix/libpandabase/futex/fmutex.cpp +++ b/platforms/unix/libpandabase/futex/fmutex.cpp @@ -16,7 +16,6 @@ #include "fmutex.h" #ifdef MC_ON -#include "time.h" #define FAIL_WITH_MESSAGE(m) ASSERT(0) #define LOG_MESSAGE(l, m) #define HELPERS_TO_UNSIGNED(m) m @@ -32,29 +31,29 @@ namespace panda::os::unix::memory::futex { // This field is set to false in case of deadlock with daemon threads (only daemon threads // are not finished and they have state IS_BLOCKED). In this case we should terminate // those threads ignoring failures on lock structures destructors. -static ATOMIC(bool) DEADLOCK_FLAG = false; +static std::atomic DEADLOCK_FLAG; #ifdef MC_ON // GenMC does not support syscalls(futex) -static ATOMIC_INT FUTEX_SIGNAL; +static std::atomic FUTEX_SIGNAL; -static inline void FutexWait(ATOMIC_INT *m, int v) +static inline void FutexWait(std::atomic *m, int v) { // Atomic with acquire order reason: mutex synchronization - int s = ATOMIC_LOAD(&FUTEX_SIGNAL, memory_order_acquire); + int s = FUTEX_SIGNAL.load(std::memory_order_acquire); // Atomic with acquire order reason: mutex synchronization - if (ATOMIC_LOAD(m, memory_order_acquire) != v) { + if (m->load(std::memory_order_acquire) != v) { return; } // Atomic with acquire order reason: mutex synchronization - while (ATOMIC_LOAD(&FUTEX_SIGNAL, memory_order_acquire) == s) { + while (FUTEX_SIGNAL.load(std::memory_order_acquire) == s) { } } static inline void FutexWake(void) { // Atomic with release order reason: mutex synchronization - ATOMIC_FETCH_ADD(&FUTEX_SIGNAL, 1, memory_order_release); + FUTEX_SIGNAL.fetch_add(1, std::memory_order_release); } #else // futex() is defined in header, as it is still included in different places @@ -78,25 +77,25 @@ int *GetStateAddr(struct fmutex *const m) void IncrementWaiters(struct fmutex *const m) { // Atomic with relaxed order reason: mutex synchronization - ATOMIC_FETCH_ADD(&m->state_and_waiters, WAITER_INCREMENT, memory_order_relaxed); + m->state_and_waiters.fetch_add(WAITER_INCREMENT, std::memory_order_relaxed); } void DecrementWaiters(struct fmutex *const m) { // Atomic with relaxed order reason: mutex synchronization - ATOMIC_FETCH_SUB(&m->state_and_waiters, WAITER_INCREMENT, memory_order_relaxed); + m->state_and_waiters.fetch_sub(WAITER_INCREMENT, std::memory_order_relaxed); } int32_t GetWaiters(struct fmutex *const m) { // Atomic with relaxed order reason: mutex synchronization - return static_cast(static_cast(ATOMIC_LOAD(&m->state_and_waiters, memory_order_relaxed)) >> + return static_cast(static_cast(m->state_and_waiters.load(std::memory_order_relaxed)) >> static_cast(WAITER_SHIFT)); } bool IsHeld(struct fmutex *const m, THREAD_ID thread) { // Atomic with relaxed order reason: mutex synchronization - return ATOMIC_LOAD(&m->exclusive_owner, memory_order_relaxed) == thread; + return m->exclusive_owner.load(std::memory_order_relaxed) == thread; } // Spin for small arguments and yield for longer ones. @@ -118,7 +117,7 @@ static void BackOff(uint32_t i) // Wait until pred is true, or until timeout is reached. // Return true if the predicate test succeeded, false if we timed out. -static inline bool WaitBrieflyFor(ATOMIC_INT *addr) +static inline bool WaitBrieflyFor(std::atomic *addr) { // We probably don't want to do syscall (switch context) when we use WaitBrieflyFor static constexpr uint32_t MAX_BACK_OFF = 10; @@ -126,7 +125,7 @@ static inline bool WaitBrieflyFor(ATOMIC_INT *addr) for (uint32_t i = 1; i <= MAX_ITER; i++) { BackOff(MIN(i, MAX_BACK_OFF)); // Atomic with relaxed order reason: mutex synchronization - int state = ATOMIC_LOAD(addr, memory_order_relaxed); + int state = addr->load(std::memory_order_relaxed); if ((HELPERS_TO_UNSIGNED(state) & HELPERS_TO_UNSIGNED(HELD_MASK)) == 0) { return true; } @@ -137,11 +136,11 @@ static inline bool WaitBrieflyFor(ATOMIC_INT *addr) void MutexInit(struct fmutex *const m) { // Atomic with relaxed order reason: mutex synchronization - ATOMIC_STORE(&m->exclusive_owner, 0, memory_order_relaxed); + m->exclusive_owner.store(0, std::memory_order_relaxed); m->recursive_count = 0; m->recursive_mutex = false; // Atomic with release order reason: mutex synchronization - ATOMIC_STORE(&m->state_and_waiters, 0, memory_order_release); + m->state_and_waiters.store(0, std::memory_order_release); } void MutexDestroy(struct fmutex *const m) @@ -151,10 +150,10 @@ void MutexDestroy(struct fmutex *const m) if (!MutexDoNotCheckOnTerminationLoop()) { #endif // PANDA_TARGET_MOBILE // Atomic with relaxed order reason: mutex synchronization - if (ATOMIC_LOAD(&m->state_and_waiters, memory_order_relaxed) != 0) { + if (m->state_and_waiters.load(std::memory_order_relaxed) != 0) { FAIL_WITH_MESSAGE("Mutex destruction failed; state_and_waiters is non zero!"); // Atomic with relaxed order reason: mutex synchronization - } else if (ATOMIC_LOAD(&m->exclusive_owner, memory_order_relaxed) != 0) { + } else if (m->exclusive_owner.load(std::memory_order_relaxed) != 0) { FAIL_WITH_MESSAGE("Mutex destruction failed; mutex has an owner!"); } #ifndef PANDA_TARGET_MOBILE @@ -180,12 +179,12 @@ bool MutexLock(struct fmutex *const m, bool trylock) bool done = false; while (!done) { // Atomic with relaxed order reason: mutex synchronization - auto cur_state = ATOMIC_LOAD(&m->state_and_waiters, memory_order_relaxed); + auto cur_state = m->state_and_waiters.load(std::memory_order_relaxed); if (LIKELY((HELPERS_TO_UNSIGNED(cur_state) & HELPERS_TO_UNSIGNED(HELD_MASK)) == 0)) { // Lock not held, try acquiring it. auto new_state = HELPERS_TO_UNSIGNED(cur_state) | HELPERS_TO_UNSIGNED(HELD_MASK); - done = ATOMIC_CAS_WEAK(&m->state_and_waiters, cur_state, new_state, memory_order_acquire, - memory_order_relaxed); + done = m->state_and_waiters.compare_exchange_weak(cur_state, new_state, std::memory_order_acquire, + std::memory_order_relaxed); } else { if (trylock) { return false; @@ -214,7 +213,7 @@ bool MutexLock(struct fmutex *const m, bool trylock) } #endif // Atomic with relaxed order reason: mutex synchronization - cur_state = ATOMIC_LOAD(&m->state_and_waiters, memory_order_relaxed); + cur_state = m->state_and_waiters.load(std::memory_order_relaxed); } DecrementWaiters(m); } @@ -222,12 +221,12 @@ bool MutexLock(struct fmutex *const m, bool trylock) } // Mutex is held now // Atomic with relaxed order reason: mutex synchronization - ASSERT((HELPERS_TO_UNSIGNED(ATOMIC_LOAD(&m->state_and_waiters, memory_order_relaxed)) & + ASSERT((HELPERS_TO_UNSIGNED(m->state_and_waiters.load(std::memory_order_relaxed)) & HELPERS_TO_UNSIGNED(HELD_MASK)) != 0); // Atomic with relaxed order reason: mutex synchronization - ASSERT(ATOMIC_LOAD(&m->exclusive_owner, memory_order_relaxed) == 0); + ASSERT(m->exclusive_owner.load(std::memory_order_relaxed) == 0); // Atomic with relaxed order reason: mutex synchronization - ATOMIC_STORE(&m->exclusive_owner, current_tid, memory_order_relaxed); + m->exclusive_owner.store(current_tid, std::memory_order_relaxed); m->recursive_count++; ASSERT(m->recursive_count == 1); // should be 1 here, there's a separate path for recursive mutex above return true; @@ -267,7 +266,7 @@ void MutexUnlock(struct fmutex *const m) ASSERT(m->recursive_count == 0); // should be 0 here, there's a separate path for recursive mutex above bool done = false; // Atomic with relaxed order reason: mutex synchronization - auto cur_state = ATOMIC_LOAD(&m->state_and_waiters, memory_order_relaxed); + auto cur_state = m->state_and_waiters.load(std::memory_order_relaxed); // Retry CAS until succeess while (!done) { auto new_state = HELPERS_TO_UNSIGNED(cur_state) & ~HELPERS_TO_UNSIGNED(HELD_MASK); // State without holding bit @@ -276,9 +275,10 @@ void MutexUnlock(struct fmutex *const m) } // Reset exclusive owner before changing state to avoid check failures if other thread sees UNLOCKED // Atomic with relaxed order reason: mutex synchronization - ATOMIC_STORE(&m->exclusive_owner, 0, memory_order_relaxed); + m->exclusive_owner.store(0, std::memory_order_relaxed); // cur_state should be updated with fetched value on fail - done = ATOMIC_CAS_WEAK(&m->state_and_waiters, cur_state, new_state, memory_order_release, memory_order_relaxed); + done = m->state_and_waiters.compare_exchange_weak(cur_state, new_state, std::memory_order_release, + std::memory_order_relaxed); if (LIKELY(done)) { // If we had waiters, we need to do futex call if (UNLIKELY(new_state != 0)) { @@ -296,12 +296,12 @@ void MutexUnlock(struct fmutex *const m) void MutexLockForOther(struct fmutex *const m, THREAD_ID thread) { // Atomic with relaxed order reason: mutex synchronization - ASSERT(ATOMIC_LOAD(&m->state_and_waiters, memory_order_relaxed) == 0); + ASSERT(m->state_and_waiters.load(std::memory_order_relaxed) == 0); // Atomic with relaxed order reason: mutex synchronization - ATOMIC_STORE(&m->state_and_waiters, HELD_MASK, memory_order_relaxed); + m->state_and_waiters.store(HELD_MASK, std::memory_order_relaxed); m->recursive_count = 1; // Atomic with relaxed order reason: mutex synchronization - ATOMIC_STORE(&m->exclusive_owner, thread, memory_order_relaxed); + m->exclusive_owner.store(thread, std::memory_order_relaxed); } void MutexUnlockForOther(struct fmutex *const m, THREAD_ID thread) @@ -310,17 +310,17 @@ void MutexUnlockForOther(struct fmutex *const m, THREAD_ID thread) FAIL_WITH_MESSAGE("Unlocking for thread which doesn't own this mutex"); } // Atomic with relaxed order reason: mutex synchronization - ASSERT(ATOMIC_LOAD(&m->state_and_waiters, memory_order_relaxed) == HELD_MASK); + ASSERT(m->state_and_waiters.load(std::memory_order_relaxed) == HELD_MASK); // Atomic with relaxed order reason: mutex synchronization - ATOMIC_STORE(&m->state_and_waiters, 0, memory_order_relaxed); + m->state_and_waiters.store(0, std::memory_order_relaxed); m->recursive_count = 0; // Atomic with relaxed order reason: mutex synchronization - ATOMIC_STORE(&m->exclusive_owner, 0, memory_order_relaxed); + m->exclusive_owner.store(0, std::memory_order_relaxed); } void ConditionVariableInit(struct CondVar *const cond) { - ATOMIC_STORE(&cond->mutex_ptr, nullptr, memory_order_relaxed); + cond->mutex_ptr.store(nullptr, std::memory_order_relaxed); cond->cond = 0; cond->waiters = 0; } @@ -331,7 +331,7 @@ void ConditionVariableDestroy(struct CondVar *const cond) if (!MutexDoNotCheckOnTerminationLoop()) { #endif // PANDA_TARGET_MOBILE // Atomic with relaxed order reason: mutex synchronization - if (ATOMIC_LOAD(&cond->waiters, memory_order_relaxed) != 0) { + if (cond->waiters.load(std::memory_order_relaxed) != 0) { FAIL_WITH_MESSAGE("CondVar destruction failed; waiters is non zero!"); } #ifndef PANDA_TARGET_MOBILE @@ -346,6 +346,7 @@ int *GetCondAddr(struct CondVar *const v) return reinterpret_cast(&v->cond); } +#ifndef MC_ON const int64_t MILLISECONDS_PER_SEC = 1000; const int64_t NANOSECONDS_PER_MILLISEC = 1000000; const int64_t NANOSECONDS_PER_SEC = 1000000000; @@ -363,6 +364,7 @@ struct timespec ConvertTime(uint64_t ms, uint64_t ns) } return time; } +#endif void Wait(struct CondVar *const cond, struct fmutex *const m) { @@ -376,7 +378,7 @@ void Wait(struct CondVar *const cond, struct fmutex *const m) // It's undefined behavior to call Wait with different mutexes on the same condvar struct fmutex *old_mutex = nullptr; // Atomic with relaxed order reason: mutex synchronization - while (!ATOMIC_CAS_WEAK(&cond->mutex_ptr, old_mutex, m, memory_order_relaxed, memory_order_relaxed)) { + while (!cond->mutex_ptr.compare_exchange_weak(old_mutex, m, std::memory_order_relaxed, std::memory_order_relaxed)) { // CAS failed, either it was spurious fail and old val is nullptr, or make sure mutex ptr equals to current if (old_mutex != m && old_mutex != nullptr) { FAIL_WITH_MESSAGE("CondVar Wait failed; mutex_ptr doesn't equal to provided mutex"); @@ -384,12 +386,12 @@ void Wait(struct CondVar *const cond, struct fmutex *const m) } // Atomic with relaxed order reason: mutex synchronization - ATOMIC_FETCH_ADD(&cond->waiters, 1, memory_order_relaxed); + cond->waiters.fetch_add(1, std::memory_order_relaxed); IncrementWaiters(m); auto old_count = m->recursive_count; m->recursive_count = 1; // Atomic with relaxed order reason: mutex synchronization - auto cur_cond = ATOMIC_LOAD(&cond->cond, memory_order_relaxed); + auto cur_cond = cond->cond.load(std::memory_order_relaxed); MutexUnlock(m); #ifdef MC_ON @@ -407,7 +409,7 @@ void Wait(struct CondVar *const cond, struct fmutex *const m) m->recursive_count = old_count; DecrementWaiters(m); // Atomic with relaxed order reason: mutex synchronization - ATOMIC_FETCH_SUB(&cond->waiters, 1, memory_order_relaxed); + cond->waiters.fetch_sub(1, std::memory_order_relaxed); } bool TimedWait(struct CondVar *const cond, struct fmutex *const m, uint64_t ms, uint64_t ns, bool is_absolute) @@ -422,7 +424,7 @@ bool TimedWait(struct CondVar *const cond, struct fmutex *const m, uint64_t ms, // It's undefined behavior to call Wait with different mutexes on the same condvar struct fmutex *old_mutex = nullptr; // Atomic with relaxed order reason: mutex synchronization - while (!ATOMIC_CAS_WEAK(&cond->mutex_ptr, old_mutex, m, memory_order_relaxed, memory_order_relaxed)) { + while (!cond->mutex_ptr.compare_exchange_weak(old_mutex, m, std::memory_order_relaxed, std::memory_order_relaxed)) { // CAS failed, either it was spurious fail and old val is nullptr, or make sure mutex ptr equals to current if (old_mutex != m && old_mutex != nullptr) { FAIL_WITH_MESSAGE("CondVar Wait failed; mutex_ptr doesn't equal to provided mutex"); @@ -431,12 +433,12 @@ bool TimedWait(struct CondVar *const cond, struct fmutex *const m, uint64_t ms, bool timeout = false; // Atomic with relaxed order reason: mutex synchronization - ATOMIC_FETCH_ADD(&cond->waiters, 1, memory_order_relaxed); + cond->waiters.fetch_add(1, std::memory_order_relaxed); IncrementWaiters(m); auto old_count = m->recursive_count; m->recursive_count = 1; // Atomic with relaxed order reason: mutex synchronization - auto cur_cond = ATOMIC_LOAD(&cond->cond, memory_order_relaxed); + auto cur_cond = cond->cond.load(std::memory_order_relaxed); MutexUnlock(m); #ifdef MC_ON @@ -469,14 +471,14 @@ bool TimedWait(struct CondVar *const cond, struct fmutex *const m, uint64_t ms, m->recursive_count = old_count; DecrementWaiters(m); // Atomic with relaxed order reason: mutex synchronization - ATOMIC_FETCH_SUB(&cond->waiters, 1, memory_order_relaxed); + cond->waiters.fetch_sub(1, std::memory_order_relaxed); return timeout; } void SignalCount(struct CondVar *const cond, int32_t to_wake) { // Atomic with relaxed order reason: mutex synchronization - if (ATOMIC_LOAD(&cond->waiters, memory_order_relaxed) == 0) { + if (cond->waiters.load(std::memory_order_relaxed) == 0) { // No waiters, do nothing return; } @@ -485,11 +487,11 @@ void SignalCount(struct CondVar *const cond, int32_t to_wake) current_tid = GET_CURRENT_THREAD; } // Atomic with relaxed order reason: mutex synchronization - auto mutex = ATOMIC_LOAD(&cond->mutex_ptr, memory_order_relaxed); + auto mutex = cond->mutex_ptr.load(std::memory_order_relaxed); // If this condvar has waiters, mutex_ptr should be set ASSERT(mutex != nullptr); // Atomic with relaxed order reason: mutex synchronization - ATOMIC_FETCH_ADD(&cond->cond, 1, memory_order_relaxed); + cond->cond.fetch_add(1, std::memory_order_relaxed); #ifdef MC_ON FutexWake(); diff --git a/platforms/unix/libpandabase/futex/fmutex.h b/platforms/unix/libpandabase/futex/fmutex.h index 26b89e51e24d37b718c5e8e7b9db4a99f3b903d6..f15bcc725175cfcddbd16bd12d887d80dd635e1d 100644 --- a/platforms/unix/libpandabase/futex/fmutex.h +++ b/platforms/unix/libpandabase/futex/fmutex.h @@ -16,21 +16,16 @@ #ifndef PANDA_LIBPANDABASE_PBASE_OS_UNIX_FUTEX_FMUTEX_H #define PANDA_LIBPANDABASE_PBASE_OS_UNIX_FUTEX_FMUTEX_H +#include +#include +#include + #ifdef MC_ON #include #include #include -#include #define THREAD_ID pthread_t #define GET_CURRENT_THREAD pthread_self() -#define ATOMIC(type) _Atomic type -#define ATOMIC_INT atomic_int -#define ATOMIC_STORE(addr, val, mem) atomic_store_explicit(addr, val, mem) -#define ATOMIC_LOAD(addr, mem) atomic_load_explicit(addr, mem) -#define ATOMIC_FETCH_ADD(addr, val, mem) atomic_fetch_add_explicit(addr, val, mem) -#define ATOMIC_FETCH_SUB(addr, val, mem) atomic_fetch_sub_explicit(addr, val, mem) -#define ATOMIC_CAS_WEAK(addr, old_val, new_val, mem1, mem2) \ - atomic_compare_exchange_weak_explicit(addr, &old_val, new_val, mem1, mem2) #define ASSERT(a) assert(a) #define LIKELY(a) a #define UNLIKELY(a) a @@ -38,24 +33,12 @@ #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #else #include -#include #include -#include #include #include -#include namespace panda::os::unix::memory::futex { -#define THREAD_ID thread::ThreadId // NOLINT(cppcoreguidelines-macro-usage) -#define GET_CURRENT_THREAD os::thread::GetCurrentThreadId() // NOLINT(cppcoreguidelines-macro-usage) -#define ATOMIC(type) std::atomic // NOLINT(cppcoreguidelines-macro-usage) -#define ATOMIC_INT ATOMIC(int) // NOLINT(cppcoreguidelines-macro-usage) -#define ATOMIC_STORE(addr, val, mem) (addr)->store(val, std::mem) // NOLINT(cppcoreguidelines-macro-usage) -#define ATOMIC_LOAD(addr, mem) (addr)->load(std::mem) // NOLINT(cppcoreguidelines-macro-usage) -#define ATOMIC_FETCH_ADD(addr, val, mem) (addr)->fetch_add(val, std::mem) // NOLINT(cppcoreguidelines-macro-usage) -#define ATOMIC_FETCH_SUB(addr, val, mem) (addr)->fetch_sub(val, std::mem) // NOLINT(cppcoreguidelines-macro-usage) -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define ATOMIC_CAS_WEAK(addr, old_val, new_val, mem1, mem2) \ - (addr)->compare_exchange_weak(old_val, new_val, std::mem1, std::mem2) +#define THREAD_ID thread::ThreadId // NOLINT(cppcoreguidelines-macro-usage) +#define GET_CURRENT_THREAD os::thread::GetCurrentThreadId() // NOLINT(cppcoreguidelines-macro-usage) #endif // Copy of mutex storage, after complete implementation should totally replace mutex::current_tid @@ -94,8 +77,8 @@ struct fmutex { // Other bits: Number of waiters. // Unified lock state and waiters count to avoid requirement of double seq_cst memory order on mutex unlock // as it's done in RWLock::WriteUnlock - ATOMIC_INT state_and_waiters; - ATOMIC(THREAD_ID) exclusive_owner; + std::atomic state_and_waiters; + std::atomic exclusive_owner; int recursive_count; bool recursive_mutex; }; @@ -109,14 +92,10 @@ bool MutexDoNotCheckOnTerminationLoop(); void MutexIgnoreChecksOnTerminationLoop(); struct CondVar { -#ifdef MC_ON - alignas(alignof(uint64_t)) struct fmutex *ATOMIC(mutex_ptr); -#else - alignas(alignof(uint64_t)) ATOMIC(struct fmutex *) mutex_ptr; -#endif + alignas(alignof(uint64_t)) std::atomic mutex_ptr; // The value itself is not important, detected only its change - ATOMIC(int32_t) cond; - ATOMIC(int32_t) waiters; + std::atomic cond; + std::atomic waiters; }; void ConditionVariableInit(struct CondVar *cond);