2019-06-26 22:11:22 +02:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <atomic>
|
|
|
|
#include <thread>
|
|
|
|
|
|
|
|
class spin_lock {
|
|
|
|
std::atomic_flag locked = ATOMIC_FLAG_INIT;
|
|
|
|
public:
|
2019-07-05 20:02:40 +02:00
|
|
|
inline void lock() {
|
2019-06-26 22:11:22 +02:00
|
|
|
uint8_t round = 0;
|
|
|
|
while (locked.test_and_set(std::memory_order_acquire)) {
|
2019-07-08 11:00:58 +02:00
|
|
|
#if false
|
2019-07-05 23:28:24 +02:00
|
|
|
/* waiting 'till its zero so we can try an exchanged again; Atomic exchanges have a huge bug overhead to deal with! */
|
|
|
|
#ifdef WIN32
|
|
|
|
while(locked._My_flag > 0) {
|
|
|
|
#else
|
2019-07-06 21:47:29 +02:00
|
|
|
while(locked._M_i) {
|
2019-07-08 11:00:58 +02:00
|
|
|
#endif
|
2019-07-05 23:28:24 +02:00
|
|
|
#endif
|
2019-07-05 20:02:40 +02:00
|
|
|
//Yield when we're using this lock for a longer time, which we usually not doing
|
|
|
|
if(round++ % 8 == 0)
|
|
|
|
std::this_thread::yield();
|
2019-07-08 11:00:58 +02:00
|
|
|
#if false
|
2019-07-05 20:02:40 +02:00
|
|
|
}
|
2019-07-08 11:00:58 +02:00
|
|
|
#endif
|
|
|
|
}
|
2019-06-26 22:11:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
inline bool try_lock() {
|
|
|
|
return !locked.test_and_set(std::memory_order_acquire);
|
|
|
|
}
|
|
|
|
|
2019-07-05 20:02:40 +02:00
|
|
|
inline void unlock() {
|
2019-06-26 22:11:22 +02:00
|
|
|
locked.clear(std::memory_order_release);
|
|
|
|
}
|
|
|
|
};
|