improved spin lock

This commit is contained in:
WolverinDEV 2019-07-09 12:29:12 +02:00
parent efac07862c
commit 309ce3be30

View File

@ -3,34 +3,35 @@
#include <atomic>
#include <thread>
class spin_lock {
std::atomic_flag locked = ATOMIC_FLAG_INIT;
public:
inline void lock() {
uint8_t round = 0;
while (locked.test_and_set(std::memory_order_acquire)) {
#if false
/* waiting 'till its zero so we can try an exchanged again; Atomic exchanges have a huge bug overhead to deal with! */
#ifdef WIN32
while(locked._My_flag > 0) {
#ifdef WIN2
#define always_inline __forceinline
#else
while(locked._M_i) {
#define always_inline inline __attribute__((__always_inline__))
#endif
#endif
//Yield when we're using this lock for a longer time, which we usually not doing
if(round++ % 8 == 0)
std::this_thread::yield();
#if false
}
#endif
}
class spin_lock {
std::atomic_bool locked{false};
public:
always_inline void lock() {
while (locked.exchange(true, std::memory_order_acquire))
this->wait_until_release();
}
inline bool try_lock() {
return !locked.test_and_set(std::memory_order_acquire);
always_inline void wait_until_release() const {
uint8_t round = 0;
while (locked.load(std::memory_order_relaxed)) {
//Yield when we're using this lock for a longer time, which we usually not doing
if(round++ % 8 == 0)
std::this_thread::yield();
}
}
inline void unlock() {
locked.clear(std::memory_order_release);
always_inline bool try_lock() {
return !locked.exchange(true, std::memory_order_acquire);
}
always_inline void unlock() {
locked.store(false, std::memory_order_release);
}
};