improved spin lock

This commit is contained in:
WolverinDEV 2019-07-09 12:29:12 +02:00
parent efac07862c
commit 309ce3be30

View File

@ -3,34 +3,35 @@
#include <atomic> #include <atomic>
#include <thread> #include <thread>
class spin_lock { #ifdef WIN2
std::atomic_flag locked = ATOMIC_FLAG_INIT; #define always_inline __forceinline
public:
inline void lock() {
uint8_t round = 0;
while (locked.test_and_set(std::memory_order_acquire)) {
#if false
/* waiting 'till its zero so we can try an exchanged again; Atomic exchanges have a huge bug overhead to deal with! */
#ifdef WIN32
while(locked._My_flag > 0) {
#else #else
while(locked._M_i) { #define always_inline inline __attribute__((__always_inline__))
#endif
#endif #endif
class spin_lock {
std::atomic_bool locked{false};
public:
always_inline void lock() {
while (locked.exchange(true, std::memory_order_acquire))
this->wait_until_release();
}
always_inline void wait_until_release() const {
uint8_t round = 0;
while (locked.load(std::memory_order_relaxed)) {
//Yield when we're using this lock for a longer time, which we usually not doing //Yield when we're using this lock for a longer time, which we usually not doing
if(round++ % 8 == 0) if(round++ % 8 == 0)
std::this_thread::yield(); std::this_thread::yield();
#if false
}
#endif
} }
} }
inline bool try_lock() { always_inline bool try_lock() {
return !locked.test_and_set(std::memory_order_acquire); return !locked.exchange(true, std::memory_order_acquire);
} }
inline void unlock() { always_inline void unlock() {
locked.clear(std::memory_order_release); locked.store(false, std::memory_order_release);
} }
}; };