improved spin lock

This commit is contained in:
WolverinDEV 2019-07-09 12:29:12 +02:00
parent efac07862c
commit 309ce3be30

View File

@ -3,34 +3,35 @@
#include <atomic> #include <atomic>
#include <thread> #include <thread>
class spin_lock { #ifdef WIN2
std::atomic_flag locked = ATOMIC_FLAG_INIT; #define always_inline __forceinline
public:
inline void lock() {
uint8_t round = 0;
while (locked.test_and_set(std::memory_order_acquire)) {
#if false
/* waiting 'till its zero so we can try an exchanged again; Atomic exchanges have a huge bug overhead to deal with! */
#ifdef WIN32
while(locked._My_flag > 0) {
#else #else
while(locked._M_i) { #define always_inline inline __attribute__((__always_inline__))
#endif #endif
#endif
//Yield when we're using this lock for a longer time, which we usually not doing class spin_lock {
if(round++ % 8 == 0) std::atomic_bool locked{false};
std::this_thread::yield(); public:
#if false always_inline void lock() {
} while (locked.exchange(true, std::memory_order_acquire))
#endif this->wait_until_release();
}
} }
inline bool try_lock() {
return !locked.test_and_set(std::memory_order_acquire); always_inline void wait_until_release() const {
uint8_t round = 0;
while (locked.load(std::memory_order_relaxed)) {
//Yield when we're using this lock for a longer time, which we usually not doing
if(round++ % 8 == 0)
std::this_thread::yield();
}
} }
inline void unlock() { always_inline bool try_lock() {
locked.clear(std::memory_order_release); return !locked.exchange(true, std::memory_order_acquire);
}
always_inline void unlock() {
locked.store(false, std::memory_order_release);
} }
}; };