diff --git a/libpandabase/CMakeLists.txt b/libpandabase/CMakeLists.txt index d2995a5dcb7acef6bd73ca0794df7022f92a3363..7b9e63410496b4fa39633eda57c81fe477903cf6 100644 --- a/libpandabase/CMakeLists.txt +++ b/libpandabase/CMakeLists.txt @@ -419,6 +419,9 @@ if (DEFINED ENV{GENMC_PATH}) # Takes too much time #8866 # ${PANDA_ROOT}/libpandabase/tests/genmc/condvar_test_2.cpp ${PANDA_ROOT}/libpandabase/tests/genmc/condvar_test_3.cpp + ${PANDA_ROOT}/libpandabase/tests/genmc/rwlock_test_1.cpp + ${PANDA_ROOT}/libpandabase/tests/genmc/rwlock_test_2.cpp + ${PANDA_ROOT}/libpandabase/tests/genmc/rwlock_test_3.cpp ) foreach(genmc_test ${GENMC_TESTS}) diff --git a/libpandabase/tests/genmc/rwlock_test_1.cpp b/libpandabase/tests/genmc/rwlock_test_1.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6addc38d72312d3ebf24481957fb24fa00c1de23 --- /dev/null +++ b/libpandabase/tests/genmc/rwlock_test_1.cpp @@ -0,0 +1,67 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common.h" + +// The test checks basic work of RWLock +// Reader reads the global variable, then checks its value +// Writer increments the global variable and checks its value + +static struct rw_lock lock; + +static void *Reader(void *arg) +{ + int index = static_cast(reinterpret_cast(arg)); + + RWLockReadLock(&lock); + int tmp = g_shared; + ASSERT(tmp == g_shared); + RWLockUnlock(&lock); + + ASSERT(tmp == 0 || tmp == 1 || tmp == 2); + return nullptr; +} + +static void *Writer(void *arg) +{ + RWLockWriteLock(&lock); + // increment global var and check its value + CheckGlobalVar(g_shared + 1); + RWLockUnlock(&lock); + return nullptr; +} + +int main() +{ + RWLockInit(&lock); + g_shared = 0; + pthread_t t1; + pthread_t t2; + pthread_t t3; + + pthread_create(&t1, nullptr, Reader, nullptr); + pthread_create(&t2, nullptr, Reader, nullptr); + pthread_create(&t3, nullptr, Writer, nullptr); + + pthread_join(t1, nullptr); + pthread_join(t2, nullptr); + pthread_join(t3, nullptr); + + // Check that writer was finished + ASSERT(g_shared == 2); + + RWLockDestroy(&lock); + return 0; +} \ No newline at end of file diff --git a/libpandabase/tests/genmc/rwlock_test_2.cpp b/libpandabase/tests/genmc/rwlock_test_2.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fa5c2365a7e0854870414f7646691eaabc95f57b --- /dev/null +++ b/libpandabase/tests/genmc/rwlock_test_2.cpp @@ -0,0 +1,68 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common.h" + +// The test checks basic work of RWLock +// Reader reads the global variable, then checks its value +// Writer increments the global variable and checks its value + +static struct rw_lock lock; + +static void *Reader(void *arg) +{ + RWLockReadLock(&lock); + int tmp = g_shared; + ASSERT(tmp == g_shared); + RWLockUnlock(&lock); + + ASSERT(tmp == 0 || tmp == 1 || tmp == 2); + return nullptr; +} + +static void *Writer(void *arg) +{ + RWLockWriteLock(&lock); + // increment global var and check its value + CheckGlobalVar(g_shared + 1); + RWLockUnlock(&lock); + return nullptr; +} + +int main() +{ + RWLockInit(&lock); + g_shared = 0; + pthread_t t1; + pthread_t t2; + pthread_t t3; + pthread_t t4; + + pthread_create(&t1, nullptr, Reader, nullptr); + pthread_create(&t2, nullptr, Reader, nullptr); + pthread_create(&t3, nullptr, Writer, nullptr); + pthread_create(&t4, nullptr, Writer, nullptr); + + pthread_join(t1, nullptr); + pthread_join(t2, nullptr); + pthread_join(t3, nullptr); + pthread_join(t4, nullptr); + + // Check that writer was finished + ASSERT(g_shared == 2); + + RWLockDestroy(&lock); + return 0; +} \ No newline at end of file diff --git a/libpandabase/tests/genmc/rwlock_test_3.cpp b/libpandabase/tests/genmc/rwlock_test_3.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6f05ebc77198dab10249d700dd131b368d1b3c71 --- /dev/null +++ b/libpandabase/tests/genmc/rwlock_test_3.cpp @@ -0,0 +1,65 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common.h" + +// The test checks basic work of RWLock +// Reader reads the global variable, then checks its value +// Writer increments the global variable and checks its value + +static struct rw_lock lock; + +static void *Reader(void *arg) +{ + RWLockReadLock(&lock); + int tmp = g_shared; + ASSERT(tmp == g_shared); + RWLockUnlock(&lock); + + ASSERT(tmp == 0 || tmp == 1 || tmp == 2); + return nullptr; +} + +static void *Writer(void *arg) +{ + RWLockWriteLock(&lock); + // increment global var and check its value + CheckGlobalVar(g_shared + 1); + RWLockUnlock(&lock); + return nullptr; +} + +int main() +{ + RWLockInit(&lock); + g_shared = 0; + pthread_t t1; + pthread_t t3; + pthread_t t4; + + pthread_create(&t1, nullptr, Reader, nullptr); + pthread_create(&t3, nullptr, Writer, nullptr); + pthread_create(&t4, nullptr, Writer, nullptr); + + pthread_join(t1, nullptr); + pthread_join(t3, nullptr); + pthread_join(t4, nullptr); + + // Check that writer was finished + ASSERT(g_shared == 2); + + RWLockDestroy(&lock); + return 0; +} \ No newline at end of file diff --git a/platforms/unix/libpandabase/futex/fmutex.cpp b/platforms/unix/libpandabase/futex/fmutex.cpp index a5dbac319f356a09ce4e9d76a7b09fd8cc21d2f8..05fa7f542c875c2252b944b6327f56ff92ecfe12 100644 --- a/platforms/unix/libpandabase/futex/fmutex.cpp +++ b/platforms/unix/libpandabase/futex/fmutex.cpp @@ -38,6 +38,7 @@ static ATOMIC(bool) DEADLOCK_FLAG = false; #ifdef MC_ON // GenMC does not support syscalls(futex) static ATOMIC_INT FUTEX_SIGNAL; +static ATOMIC_INT SIGNAL; static inline void FutexWait(ATOMIC_INT *m, int v) { @@ -57,6 +58,25 @@ static inline void FutexWake(void) // Atomic with release order reason: mutex synchronization ATOMIC_FETCH_ADD(&FUTEX_SIGNAL, 1, memory_order_release); } + +static inline void SignalWait(ATOMIC_INT *m, int v) +{ + // Atomic with acquire order reason: mutex synchronization + int s = ATOMIC_LOAD(&SIGNAL, memory_order_acquire); + // Atomic with acquire order reason: mutex synchronization + if (ATOMIC_LOAD(m, memory_order_acquire) != v) { + return; + } + // Atomic with acquire order reason: mutex synchronization + while (ATOMIC_LOAD(&SIGNAL, memory_order_acquire) == s) { + } +} + +static inline void SignalWake(void) +{ + // Atomic with release order reason: mutex synchronization + ATOMIC_FETCH_ADD(&SIGNAL, 1, memory_order_release); +} #else // futex() is defined in header, as it is still included in different places #endif @@ -139,6 +159,30 @@ static inline bool WaitBrieflyFor(ATOMIC_INT *addr) return false; } +// GenMC does not support lambdas, have to copy the wait function +static inline bool WaitBrieflyFor(ATOMIC_INT *addr, bool readlock) +{ +#ifndef MC_ON + // We probably don't want to do syscall (switch context) when we use WaitBrieflyFor + static constexpr uint32_t MAX_BACK_OFF = 10; + static constexpr uint32_t MAX_ITER = 50; + for (uint32_t i = 1; i <= MAX_ITER; i++) { + BackOff(MIN(i, MAX_BACK_OFF)); +#endif + // Atomic with relaxed order reason: mutex synchronization + int state = ATOMIC_LOAD(addr, memory_order_relaxed); + if (readlock && state != WRITE_LOCKED) { + return true; + } + if (state == UNLOCKED) { + return true; + } +#ifndef MC_ON + } +#endif + return false; +} + void MutexInit(struct fmutex *const m) { // Atomic with relaxed order reason: mutex synchronization @@ -404,7 +448,7 @@ void Wait(struct CondVar *const cond, struct fmutex *const m) MutexUnlock(m); #ifdef MC_ON - FutexWait(&cond->cond, cur_cond); + SignalWait(&cond->cond, cur_cond); #else // NOLINTNEXTLINE(hicpp-signed-bitwise), NOLINTNEXTLINE(C_RULE_ID_FUNCTION_NESTING_LEVEL) if (futex(GetCondAddr(cond), FUTEX_WAIT_PRIVATE, cur_cond, nullptr, nullptr, 0) != 0) { @@ -451,7 +495,7 @@ bool TimedWait(struct CondVar *const cond, struct fmutex *const m, uint64_t ms, MutexUnlock(m); #ifdef MC_ON - FutexWait(&cond->cond, cur_cond); + SignalWait(&cond->cond, cur_cond); #else int wait_flag; int match_flag; @@ -503,7 +547,7 @@ void SignalCount(struct CondVar *const cond, int32_t to_wake) ATOMIC_FETCH_ADD(&cond->cond, 1, memory_order_relaxed); #ifdef MC_ON - FutexWake(); + SignalWake(); #else if (IsHeld(mutex, current_tid)) { // This thread is owner of current mutex, do requeue to mutex waitqueue @@ -521,6 +565,292 @@ void SignalCount(struct CondVar *const cond, int32_t to_wake) #endif } +int *GetStateAddr(struct RwLock *const rwlock) +{ + return reinterpret_cast(&rwlock->state); +} + +bool HasExclusiveHolder(struct RwLock *const rwlock) +{ + // Atomic with relaxed order reason: mutex synchronization + return ATOMIC_LOAD(&rwlock->exclusive_owner, memory_order_relaxed) != 0; +} + +bool IsExclusiveHeld(struct RwLock *const rwlock, THREAD_ID thread) +{ + // Atomic with relaxed order reason: mutex synchronization + return ATOMIC_LOAD(&rwlock->exclusive_owner, memory_order_relaxed) == thread; +} + +void IncrementWaiters(struct RwLock *const rwlock) +{ + // Atomic with relaxed order reason: mutex synchronization + ATOMIC_FETCH_ADD(&rwlock->waiters, 1, memory_order_relaxed); +} +void DecrementWaiters(struct RwLock *const rwlock) +{ + // Atomic with relaxed order reason: mutex synchronization + ATOMIC_FETCH_SUB(&rwlock->waiters, 1, memory_order_relaxed); +} + +void RWLockInit(struct RwLock *const rwlock) +{ + ATOMIC_STORE(&rwlock->state, 0, memory_order_relaxed); + ATOMIC_STORE(&rwlock->waiters, 0, memory_order_relaxed); + ATOMIC_STORE(&rwlock->exclusive_owner, 0, memory_order_relaxed); +} + +void RWLockDestroy(struct RwLock *const rwlock) +{ +#ifndef PANDA_TARGET_MOBILE + if (!MutexDoNotCheckOnTerminationLoop()) { +#endif // PANDA_TARGET_MOBILE + // Atomic with relaxed order reason: mutex synchronization + if (ATOMIC_LOAD(&rwlock->state, memory_order_relaxed) != 0) { + FAIL_WITH_MESSAGE("RWLock destruction failed; state is non zero!"); + // Atomic with relaxed order reason: mutex synchronization + } else if (ATOMIC_LOAD(&rwlock->exclusive_owner, memory_order_relaxed) != 0) { + FAIL_WITH_MESSAGE("RWLock destruction failed; RWLock has an owner!"); + // Atomic with relaxed order reason: mutex synchronization + } else if (ATOMIC_LOAD(&rwlock->waiters, memory_order_relaxed) != 0) { + FAIL_WITH_MESSAGE("RWLock destruction failed; RWLock has waiters!"); + } +#ifndef PANDA_TARGET_MOBILE + } else { + LOG_MESSAGE(WARNING, "Termination loop detected, ignoring RWLock"); + } +#endif // PANDA_TARGET_MOBILE +} + +void HandleReadLockWait(struct RwLock *const rwlock, int32_t cur_state) +{ + // Wait until RWLock WriteLock is unlocked + if (!WaitBrieflyFor(&rwlock->state, true)) { + // WaitBrieflyFor failed, go to futex wait + IncrementWaiters(rwlock); + // Retry wait until WriteLock not held. + while (cur_state == WRITE_LOCKED) { + // NOLINTNEXTLINE(hicpp-signed-bitwise) +#ifdef MC_ON + FutexWait(&rwlock->state, cur_state); +#else + if (futex(GetStateAddr(rwlock), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) { + if ((errno != EAGAIN) && (errno != EINTR)) { + LOG(FATAL, COMMON) << "Futex wait failed!"; + } + } +#endif + // Atomic with relaxed order reason: mutex synchronization + cur_state = ATOMIC_LOAD(&rwlock->state, memory_order_relaxed); + } + DecrementWaiters(rwlock); + } +} + +void RWLockReadLock(struct RwLock *const rwlock) +{ + bool done = false; + do { + // Atomic with relaxed order reason: mutex synchronization + auto cur_state = ATOMIC_LOAD(&rwlock->state, memory_order_relaxed); + if (LIKELY(cur_state != WRITE_LOCKED)) { + auto new_state = cur_state + READ_INCREMENT; + done = ATOMIC_CAS_WEAK(&rwlock->state, cur_state, new_state, memory_order_acquire, memory_order_relaxed); +#ifdef MC_ON + __VERIFIER_assume(done); +#endif + } else { + HandleReadLockWait(rwlock, cur_state); + } + } while (!done); + ASSERT(!HasExclusiveHolder(rwlock)); +} + +void RWLockWriteLock(struct RwLock *const rwlock) +{ + if (current_tid == 0) { + current_tid = GET_CURRENT_THREAD; + } + bool done = false; + while (!done) { + // Atomic with relaxed order reason: mutex synchronization + auto cur_state = ATOMIC_LOAD(&rwlock->state, memory_order_relaxed); + if (LIKELY(cur_state == UNLOCKED)) { + // Unlocked, can acquire writelock + // Do CAS in case other thread beats us and acquires readlock first + done = ATOMIC_CAS_WEAK(&rwlock->state, cur_state, WRITE_LOCKED, memory_order_acquire, memory_order_relaxed); +#ifdef MC_ON + __VERIFIER_assume(done); +#endif + } else { + // Wait until RWLock is unlocked + if (!WaitBrieflyFor(&rwlock->state, false)) { + // WaitBrieflyFor failed, go to futex wait + // Increment waiters count. + IncrementWaiters(rwlock); + // Retry wait until lock not held. If we have more than one reader, cur_state check fail + // doesn't mean this lock is unlocked. + while (cur_state != UNLOCKED) { +#ifdef MC_ON + FutexWait(&rwlock->state, cur_state); +#else + // NOLINTNEXTLINE(hicpp-signed-bitwise) + if (futex(GetStateAddr(rwlock), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) { + if ((errno != EAGAIN) && (errno != EINTR)) { + FAIL_WITH_MESSAGE("Futex wait failed!"); + } + } +#endif + // Atomic with relaxed order reason: mutex synchronization + cur_state = ATOMIC_LOAD(&rwlock->state, memory_order_relaxed); + } + DecrementWaiters(rwlock); + } + } + } + // RWLock is held now + // Atomic with relaxed order reason: mutex synchronization + ASSERT(ATOMIC_LOAD(&rwlock->state, memory_order_relaxed) == WRITE_LOCKED); + // Atomic with relaxed order reason: mutex synchronization + ASSERT(ATOMIC_LOAD(&rwlock->exclusive_owner, memory_order_relaxed) == 0); + // Atomic with relaxed order reason: mutex synchronization + ATOMIC_STORE(&rwlock->exclusive_owner, current_tid, memory_order_relaxed); +} + +bool RWLockTryReadLock(struct RwLock *const rwlock) +{ + bool done = false; + // Atomic with relaxed order reason: mutex synchronization + auto cur_state = ATOMIC_LOAD(&rwlock->state, memory_order_relaxed); + while (!done) { + if (cur_state >= UNLOCKED) { + auto new_state = cur_state + READ_INCREMENT; + // cur_state should be updated with fetched value on fail + done = ATOMIC_CAS_WEAK(&rwlock->state, cur_state, new_state, memory_order_acquire, memory_order_relaxed); +#ifdef MC_ON + __VERIFIER_assume(done); +#endif + } else { + // RWLock is Write held, trylock failed. + return false; + } + } + ASSERT(!HasExclusiveHolder(rwlock)); + return true; +} + +bool RWLockTryWriteLock(struct RwLock *const rwlock) +{ + if (current_tid == 0) { + current_tid = GET_CURRENT_THREAD; + } + bool done = false; + // Atomic with relaxed order reason: mutex synchronization + auto cur_state = ATOMIC_LOAD(&rwlock->state, memory_order_relaxed); + while (!done) { + if (LIKELY(cur_state == UNLOCKED)) { + // Unlocked, can acquire writelock + // Do CAS in case other thread beats us and acquires readlock first + // cur_state should be updated with fetched value on fail + done = ATOMIC_CAS_WEAK(&rwlock->state, cur_state, WRITE_LOCKED, memory_order_acquire, memory_order_relaxed); +#ifdef MC_ON + __VERIFIER_assume(done); +#endif + } else { + // RWLock is held, trylock failed. + return false; + } + } + // RWLock is held now + // Atomic with relaxed order reason: mutex synchronization + ASSERT(ATOMIC_LOAD(&rwlock->state, memory_order_relaxed) == WRITE_LOCKED); + // Atomic with relaxed order reason: mutex synchronization + ASSERT(ATOMIC_LOAD(&rwlock->exclusive_owner, memory_order_relaxed) == 0); + // Atomic with relaxed order reason: mutex synchronization + ATOMIC_STORE(&rwlock->exclusive_owner, current_tid, memory_order_relaxed); + return true; +} + +void RWLockReadUnlock(struct RwLock *const rwlock) +{ + ASSERT(!HasExclusiveHolder(rwlock)); + bool done = false; + // Atomic with relaxed order reason: mutex synchronization + auto cur_state = ATOMIC_LOAD(&rwlock->state, memory_order_relaxed); + do { + if (LIKELY(cur_state > 0)) { + // Reduce state by 1 and do release store. + // waiters load should not be reordered before state, so it's done with seq cst. + auto new_state = cur_state - READ_INCREMENT; + // cur_state should be updated with fetched value on fail + // Atomic with seq_cst order reason: mutex synchronization + done = ATOMIC_CAS_WEAK(&rwlock->state, cur_state, new_state, memory_order_release, memory_order_relaxed); +#ifdef MC_ON + __VERIFIER_assume(done); +#endif + if (done && new_state == UNLOCKED) { + // Atomic with seq_cst order reason: mutex synchronization + if (ATOMIC_LOAD(&rwlock->waiters, memory_order_relaxed) > 0) { + // Wake one exclusive waiter as there are now no readers. +#ifdef MC_ON + FutexWake(); +#else + // NOLINTNEXTLINE(hicpp-signed-bitwise) + futex(GetStateAddr(rwlock), FUTEX_WAKE_PRIVATE, WAKE_ALL, nullptr, nullptr, 0); +#endif + } + } + } else { + FAIL_WITH_MESSAGE("RWLock ReadUnlock got unexpected state, RWLock is unlocked?"); + } + } while (!done); +} + +void RWLockWriteUnlock(struct RwLock *const rwlock) +{ + ASSERT(current_tid != 0); + ASSERT(IsExclusiveHeld(rwlock, current_tid)); + + bool done = false; + // Atomic with relaxed order reason: mutex synchronization + auto cur_state = ATOMIC_LOAD(&rwlock->state, memory_order_relaxed); + // CAS is weak and might fail, do in loop + while (!done) { + if (LIKELY(cur_state == WRITE_LOCKED)) { + // Reset exclusive owner before changing state to avoid check failures if other thread sees UNLOCKED + // Atomic with relaxed order reason: mutex synchronization + ATOMIC_STORE(&rwlock->exclusive_owner, 0, memory_order_relaxed); + // Change state to unlocked and do release store. + // waiters load should not be reordered before state, so it's done with seq cst. + // cur_state should be updated with fetched value on fail + done = ATOMIC_CAS_WEAK(&rwlock->state, cur_state, UNLOCKED, memory_order_release, memory_order_relaxed); +#ifdef MC_ON + __VERIFIER_assume(done); +#endif + } else { + FAIL_WITH_MESSAGE("RWLock WriteUnlock got unexpected state, RWLock is not writelocked?"); + } + } + // We are doing write unlock, all waiters could be ReadLocks so we need to wake all. + // Atomic with seq_cst order reason: mutex synchronization + if (ATOMIC_LOAD(&rwlock->waiters, memory_order_relaxed) > 0) { +#ifdef MC_ON + FutexWake(); +#else + // NOLINTNEXTLINE(hicpp-signed-bitwise) + futex(GetStateAddr(rwlock), FUTEX_WAKE_PRIVATE, WAKE_ALL, nullptr, nullptr, 0); +#endif + } +} + +void RWLockUnlock(struct RwLock *const rwlock) +{ + if (HasExclusiveHolder(rwlock)) { + RWLockWriteUnlock(rwlock); + } else { + RWLockReadUnlock(rwlock); + } +} #ifndef MC_ON } // namespace panda::os::unix::memory::futex #endif diff --git a/platforms/unix/libpandabase/futex/fmutex.h b/platforms/unix/libpandabase/futex/fmutex.h index 36d08c098451e2e90518dd2c9ab57997839ebafd..0651230f3379207ce9db0a466eea826ddaa9d6cd 100644 --- a/platforms/unix/libpandabase/futex/fmutex.h +++ b/platforms/unix/libpandabase/futex/fmutex.h @@ -34,10 +34,10 @@ #define ASSERT(a) assert(a) #define LIKELY(a) a #define UNLIKELY(a) a -#define MIN(a, b) (((a) < (b)) ? (a) : (b)) -#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define ALWAYS_INLINE #else #include +#include #include #include #include @@ -126,6 +126,34 @@ __attribute__((visibility("default"))) void Wait(struct CondVar *cond, struct fm __attribute__((visibility("default"))) bool TimedWait(struct CondVar *cond, struct fmutex *m, uint64_t ms, uint64_t ns, bool is_absolute); +static constexpr int32_t WRITE_LOCKED = -1; +static constexpr int32_t UNLOCKED = 0; +static constexpr int32_t READ_INCREMENT = 1; +// Extra padding to make RWLock 16 bytes long +static constexpr size_t PADDING_SIZE = 1; + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct RwLock { + // -1 - write locked; 0 - unlocked; > 0 - read locked by state_ owners. + ATOMIC(int32_t) state; + // Number of waiters both for read and write locks. + ATOMIC(int32_t) waiters; + // Exclusive owner. + alignas(alignof(uint32_t)) ATOMIC(THREAD_ID) exclusive_owner; +#ifndef MC_ON + std::array padding = {0}; +#endif +}; + +__attribute__((visibility("default"))) void RWLockInit(struct RwLock *rwlock); +__attribute__((visibility("default"))) void RWLockDestroy(struct RwLock *rwlock); +__attribute__((visibility("default"))) ALWAYS_INLINE void ReadLock(struct RwLock *rwlock); +__attribute__((visibility("default"))) void RWLockUnlock(struct RwLock *rwlock); +__attribute__((visibility("default"))) void RWLockReadLock(struct RwLock *rwlock); +__attribute__((visibility("default"))) void RWLockWriteLock(struct RwLock *rwlock); +__attribute__((visibility("default"))) bool RWLockTryReadLock(struct RwLock *rwlock); +__attribute__((visibility("default"))) bool RWLockTryWriteLock(struct RwLock *rwlock); + #ifndef MC_ON } // namespace panda::os::unix::memory::futex #endif diff --git a/platforms/unix/libpandabase/futex/mutex.cpp b/platforms/unix/libpandabase/futex/mutex.cpp index 670d1d4caacdc836998c092a6841500ef8690c5b..464d61acf6cef23a88e975218b8548298cb7a27c 100644 --- a/platforms/unix/libpandabase/futex/mutex.cpp +++ b/platforms/unix/libpandabase/futex/mutex.cpp @@ -33,39 +33,6 @@ void PostFork() current_tid = os::thread::GetCurrentThreadId(); } -// Spin for small arguments and yield for longer ones. -static void BackOff(uint32_t i) -{ - static constexpr uint32_t SPIN_MAX = 10; - if (i <= SPIN_MAX) { - volatile uint32_t x = 0; // Volatile to make sure loop is not optimized out. - const uint32_t spin_count = 10 * i; - for (uint32_t spin = 0; spin < spin_count; spin++) { - ++x; - } - } else { - thread::Yield(); - } -} - -// Wait until pred is true, or until timeout is reached. -// Return true if the predicate test succeeded, false if we timed out. -template -static inline bool WaitBrieflyFor(std::atomic_int *addr, Pred pred) -{ - // We probably don't want to do syscall (switch context) when we use WaitBrieflyFor - static constexpr uint32_t MAX_BACK_OFF = 10; - static constexpr uint32_t MAX_ITER = 50; - for (uint32_t i = 1; i <= MAX_ITER; i++) { - BackOff(std::min(i, MAX_BACK_OFF)); - // Atomic with relaxed order reason: mutex synchronization - if (pred(addr->load(std::memory_order_relaxed))) { - return true; - } - } - return false; -} - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) Mutex::Mutex() { @@ -106,173 +73,4 @@ void Mutex::UnlockForOther(thread::ThreadId thread) { MutexUnlockForOther(&mutex_, thread); } - -RWLock::~RWLock() -{ -#ifndef PANDA_TARGET_MOBILE - if (!Mutex::DoNotCheckOnTerminationLoop()) { -#endif // PANDA_TARGET_MOBILE - // Atomic with relaxed order reason: mutex synchronization - if (state_.load(std::memory_order_relaxed) != 0) { - LOG(FATAL, COMMON) << "RWLock destruction failed; state_ is non zero!"; - // Atomic with relaxed order reason: mutex synchronization - } else if (exclusive_owner_.load(std::memory_order_relaxed) != 0) { - LOG(FATAL, COMMON) << "RWLock destruction failed; RWLock has an owner!"; - // Atomic with relaxed order reason: mutex synchronization - } else if (waiters_.load(std::memory_order_relaxed) != 0) { - LOG(FATAL, COMMON) << "RWLock destruction failed; RWLock has waiters!"; - } -#ifndef PANDA_TARGET_MOBILE - } else { - LOG(WARNING, COMMON) << "Termination loop detected, ignoring RWLock"; - } -#endif // PANDA_TARGET_MOBILE -} - -void RWLock::WriteLock() -{ - if (current_tid == 0) { - current_tid = os::thread::GetCurrentThreadId(); - } - bool done = false; - while (!done) { - // Atomic with relaxed order reason: mutex synchronization - auto cur_state = state_.load(std::memory_order_relaxed); - if (LIKELY(cur_state == UNLOCKED)) { - // Unlocked, can acquire writelock - // Do CAS in case other thread beats us and acquires readlock first - done = state_.compare_exchange_weak(cur_state, WRITE_LOCKED, std::memory_order_acquire); - } else { - // Wait until RWLock is unlocked - if (!WaitBrieflyFor(&state_, [](int32_t state) { return state == UNLOCKED; })) { - // WaitBrieflyFor failed, go to futex wait - // Increment waiters count. - IncrementWaiters(); - // Retry wait until lock not held. If we have more than one reader, cur_state check fail - // doesn't mean this lock is unlocked. - while (cur_state != UNLOCKED) { - // NOLINTNEXTLINE(hicpp-signed-bitwise) - if (futex(GetStateAddr(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) { - if ((errno != EAGAIN) && (errno != EINTR)) { - LOG(FATAL, COMMON) << "Futex wait failed!"; - } - } - // Atomic with relaxed order reason: mutex synchronization - cur_state = state_.load(std::memory_order_relaxed); - } - DecrementWaiters(); - } - } - } - // RWLock is held now - // Atomic with relaxed order reason: mutex synchronization - ASSERT(state_.load(std::memory_order_relaxed) == WRITE_LOCKED); - // Atomic with relaxed order reason: mutex synchronization - ASSERT(exclusive_owner_.load(std::memory_order_relaxed) == 0); - // Atomic with relaxed order reason: mutex synchronization - exclusive_owner_.store(current_tid, std::memory_order_relaxed); -} - -void RWLock::HandleReadLockWait(int32_t cur_state) -{ - // Wait until RWLock WriteLock is unlocked - if (!WaitBrieflyFor(&state_, [](int32_t state) { return state >= UNLOCKED; })) { - // WaitBrieflyFor failed, go to futex wait - IncrementWaiters(); - // Retry wait until WriteLock not held. - while (cur_state == WRITE_LOCKED) { - // NOLINTNEXTLINE(hicpp-signed-bitwise) - if (futex(GetStateAddr(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) { - if ((errno != EAGAIN) && (errno != EINTR)) { - LOG(FATAL, COMMON) << "Futex wait failed!"; - } - } - // Atomic with relaxed order reason: mutex synchronization - cur_state = state_.load(std::memory_order_relaxed); - } - DecrementWaiters(); - } -} - -bool RWLock::TryReadLock() -{ - bool done = false; - // Atomic with relaxed order reason: mutex synchronization - auto cur_state = state_.load(std::memory_order_relaxed); - while (!done) { - if (cur_state >= UNLOCKED) { - auto new_state = cur_state + READ_INCREMENT; - // cur_state should be updated with fetched value on fail - done = state_.compare_exchange_weak(cur_state, new_state, std::memory_order_acquire); - } else { - // RWLock is Write held, trylock failed. - return false; - } - } - ASSERT(!HasExclusiveHolder()); - return true; -} - -bool RWLock::TryWriteLock() -{ - if (current_tid == 0) { - current_tid = os::thread::GetCurrentThreadId(); - } - bool done = false; - // Atomic with relaxed order reason: mutex synchronization - auto cur_state = state_.load(std::memory_order_relaxed); - while (!done) { - if (LIKELY(cur_state == UNLOCKED)) { - // Unlocked, can acquire writelock - // Do CAS in case other thread beats us and acquires readlock first - // cur_state should be updated with fetched value on fail - done = state_.compare_exchange_weak(cur_state, WRITE_LOCKED, std::memory_order_acquire); - } else { - // RWLock is held, trylock failed. - return false; - } - } - // RWLock is held now - // Atomic with relaxed order reason: mutex synchronization - ASSERT(state_.load(std::memory_order_relaxed) == WRITE_LOCKED); - // Atomic with relaxed order reason: mutex synchronization - ASSERT(exclusive_owner_.load(std::memory_order_relaxed) == 0); - // Atomic with relaxed order reason: mutex synchronization - exclusive_owner_.store(current_tid, std::memory_order_relaxed); - return true; -} - -void RWLock::WriteUnlock() -{ - if (current_tid == 0) { - current_tid = os::thread::GetCurrentThreadId(); - } - ASSERT(IsExclusiveHeld(current_tid)); - - bool done = false; - // Atomic with relaxed order reason: mutex synchronization - int32_t cur_state = state_.load(std::memory_order_relaxed); - // CAS is weak and might fail, do in loop - while (!done) { - if (LIKELY(cur_state == WRITE_LOCKED)) { - // Reset exclusive owner before changing state to avoid check failures if other thread sees UNLOCKED - // Atomic with relaxed order reason: mutex synchronization - exclusive_owner_.store(0, std::memory_order_relaxed); - // Change state to unlocked and do release store. - // waiters_ load should not be reordered before state_, so it's done with seq cst. - // cur_state should be updated with fetched value on fail - done = state_.compare_exchange_weak(cur_state, UNLOCKED, std::memory_order_seq_cst); - if (LIKELY(done)) { - // We are doing write unlock, all waiters could be ReadLocks so we need to wake all. - // Atomic with seq_cst order reason: mutex synchronization - if (waiters_.load(std::memory_order_seq_cst) > 0) { - // NOLINTNEXTLINE(hicpp-signed-bitwise) - futex(GetStateAddr(), FUTEX_WAKE_PRIVATE, WAKE_ALL, nullptr, nullptr, 0); - } - } - } else { - LOG(FATAL, COMMON) << "RWLock WriteUnlock got unexpected state, RWLock is not writelocked?"; - } - } -} } // namespace panda::os::unix::memory::futex diff --git a/platforms/unix/libpandabase/futex/mutex.h b/platforms/unix/libpandabase/futex/mutex.h index f3514891257a448af0b4ac2ea1f13c01c563ff0c..9d7c714bbfc61a3cf5e3d692c66339deb8b387b6 100644 --- a/platforms/unix/libpandabase/futex/mutex.h +++ b/platforms/unix/libpandabase/futex/mutex.h @@ -135,127 +135,46 @@ private: class SHARED_CAPABILITY("mutex") RWLock { public: - RWLock() = default; - - PANDA_PUBLIC_API ~RWLock(); - - // ReadLock and ReadUnlock are used in mutator lock often, prefer inlining over call to libpandabase - ALWAYS_INLINE void ReadLock() ACQUIRE_SHARED() + PANDA_PUBLIC_API RWLock() { - bool done = false; - while (!done) { - // Atomic with relaxed order reason: mutex synchronization - auto cur_state = state_.load(std::memory_order_relaxed); - if (LIKELY(cur_state >= UNLOCKED)) { - auto new_state = cur_state + READ_INCREMENT; - done = state_.compare_exchange_weak(cur_state, new_state, std::memory_order_acquire); - } else { - HandleReadLockWait(cur_state); - } - } - ASSERT(!HasExclusiveHolder()); + futex::RWLockInit(&rwlock_); } - ALWAYS_INLINE void Unlock() RELEASE_GENERIC() + PANDA_PUBLIC_API ~RWLock() { - if (HasExclusiveHolder()) { - WriteUnlock(); - } else { - ReadUnlock(); - } + futex::RWLockDestroy(&rwlock_); } - PANDA_PUBLIC_API void WriteLock() ACQUIRE(); - - PANDA_PUBLIC_API bool TryReadLock() TRY_ACQUIRE_SHARED(true); - - PANDA_PUBLIC_API bool TryWriteLock() TRY_ACQUIRE(true); - -private: - ALWAYS_INLINE void ReadUnlock() RELEASE_SHARED() + // ReadLock and ReadUnlock are used in mutator lock often, prefer inlining over call to libpandabase + ALWAYS_INLINE void ReadLock() ACQUIRE_SHARED() { - ASSERT(!HasExclusiveHolder()); - bool done = false; - // Atomic with relaxed order reason: mutex synchronization - auto cur_state = state_.load(std::memory_order_relaxed); - while (!done) { - if (LIKELY(cur_state > 0)) { - // Reduce state by 1 and do release store. - // waiters_ load should not be reordered before state_, so it's done with seq cst. - auto new_state = cur_state - READ_INCREMENT; - // cur_state should be updated with fetched value on fail - // Atomic with seq_cst order reason: mutex synchronization - done = state_.compare_exchange_weak(cur_state, new_state, std::memory_order_seq_cst); - if (done && new_state == UNLOCKED) { - // Atomic with seq_cst order reason: mutex synchronization - if (waiters_.load(std::memory_order_seq_cst) > 0) { - // Wake one exclusive waiter as there are now no readers. - // NOLINTNEXTLINE(hicpp-signed-bitwise) - futex(GetStateAddr(), FUTEX_WAKE_PRIVATE, WAKE_ALL, nullptr, nullptr, 0); - } - } - } else { - // Cannot use logger in header - std::cout << "RWLock ReadUnlock got unexpected state, RWLock is unlocked?" << std::endl; - std::abort(); - } - } + futex::RWLockReadLock(&rwlock_); } - PANDA_PUBLIC_API void WriteUnlock() RELEASE(); - - // Non-inline path for handling waiting. - PANDA_PUBLIC_API void HandleReadLockWait(int32_t cur_state); - - static constexpr int32_t WRITE_LOCKED = -1; - static constexpr int32_t UNLOCKED = 0; - static constexpr int32_t READ_INCREMENT = 1; - // -1 - write locked; 0 - unlocked; > 0 - read locked by state_ owners. - std::atomic_int32_t state_ {0}; - - int *GetStateAddr() + ALWAYS_INLINE void Unlock() RELEASE_GENERIC() { - return reinterpret_cast(&state_); + futex::RWLockUnlock(&rwlock_); } - // Exclusive owner. - alignas(alignof(uint32_t)) std::atomic exclusive_owner_ {0}; - static_assert(std::atomic::is_always_lock_free); - - bool HasExclusiveHolder() - { - // Atomic with relaxed order reason: mutex synchronization - return exclusive_owner_.load(std::memory_order_relaxed) != 0; - } - bool IsExclusiveHeld(thread::ThreadId thread) + PANDA_PUBLIC_API void WriteLock() ACQUIRE() { - // Atomic with relaxed order reason: mutex synchronization - return exclusive_owner_.load(std::memory_order_relaxed) == thread; + return futex::RWLockWriteLock(&rwlock_); } - // Number of waiters both for read and write locks. - std::atomic_uint32_t waiters_ {0}; - - void IncrementWaiters() + PANDA_PUBLIC_API bool TryReadLock() TRY_ACQUIRE_SHARED(true) { - // Atomic with relaxed order reason: mutex synchronization - waiters_.fetch_add(1, std::memory_order_relaxed); - } - void DecrementWaiters() - { - // Atomic with relaxed order reason: mutex synchronization - waiters_.fetch_sub(1, std::memory_order_relaxed); + return futex::RWLockTryReadLock(&rwlock_); } - // Extra padding to make RWLock 16 bytes long - static constexpr size_t PADDING_SIZE = 1; - std::array padding_ = {0}; - // [[maybe_unused]] causes issues, dummy accessor for `padding_` as workaround - uint32_t DummyAccessPadding() + PANDA_PUBLIC_API bool TryWriteLock() TRY_ACQUIRE(true) { - return padding_[0]; + return futex::RWLockTryWriteLock(&rwlock_); } +private: + static_assert(std::atomic::is_always_lock_free); + struct RwLock rwlock_; + NO_COPY_SEMANTIC(RWLock); NO_MOVE_SEMANTIC(RWLock); }; @@ -302,7 +221,6 @@ private: static constexpr size_t ALL_STRUCTURES_SIZE = 16U; static_assert(sizeof(ConditionVariable) == ALL_STRUCTURES_SIZE); -static_assert(sizeof(RWLock) == ALL_STRUCTURES_SIZE); } // namespace panda::os::unix::memory::futex #endif // PANDA_LIBPANDABASE_PBASE_OS_UNIX__FUTEX_MUTEX_H_