wip static-lib

This commit is contained in:
gabime 2019-05-08 17:16:56 +03:00
parent 29c949ab03
commit 5d0eb6dda5
7 changed files with 186 additions and 133 deletions

Binary file not shown.

View File

@ -68,9 +68,9 @@ size_t thread_id() SPDLOG_NOEXCEPT;
// See https://github.com/gabime/spdlog/issues/609
void sleep_for_millis(int milliseconds) SPDLOG_NOEXCEPT;
std::string filename_to_str(const filename_t &filename);
std::string filename_to_str(const filename_t &filename) SPDLOG_NOEXCEPT;
int pid();
int pid() SPDLOG_NOEXCEPT;
// Determine if the terminal supports colors
// Source: https://github.com/agauniyal/rang/

View File

@ -23,43 +23,12 @@ namespace details {
class periodic_worker
{
public:
periodic_worker(const std::function<void()> &callback_fun, std::chrono::seconds interval)
{
active_ = (interval > std::chrono::seconds::zero());
if (!active_)
{
return;
}
worker_thread_ = std::thread([this, callback_fun, interval]() {
for (;;)
{
std::unique_lock<std::mutex> lock(this->mutex_);
if (this->cv_.wait_for(lock, interval, [this] { return !this->active_; }))
{
return; // active_ == false, so exit this thread
}
callback_fun();
}
});
}
periodic_worker(const std::function<void()> &callback_fun, std::chrono::seconds interval);
periodic_worker(const periodic_worker &) = delete;
periodic_worker &operator=(const periodic_worker &) = delete;
// stop the worker thread and join it
~periodic_worker()
{
if (worker_thread_.joinable())
{
{
std::lock_guard<std::mutex> lock(mutex_);
active_ = false;
}
cv_.notify_one();
worker_thread_.join();
}
}
~periodic_worker();
private:
bool active_;
@ -69,3 +38,8 @@ private:
};
} // namespace details
} // namespace spdlog
#ifndef SPDLOG_STATIC_LIB
#include "spdlog/impl/periodic_worker.cpp"
#endif

View File

@ -123,118 +123,36 @@ public:
using item_type = async_msg;
using q_type = details::mpmc_blocking_queue<item_type>;
thread_pool(size_t q_max_items, size_t threads_n)
: q_(q_max_items)
{
// std::cout << "thread_pool() q_size_bytes: " << q_size_bytes <<
// "\tthreads_n: " << threads_n << std::endl;
if (threads_n == 0 || threads_n > 1000)
{
throw spdlog_ex("spdlog::thread_pool(): invalid threads_n param (valid "
"range is 1-1000)");
}
for (size_t i = 0; i < threads_n; i++)
{
threads_.emplace_back(&thread_pool::worker_loop_, this);
}
}
thread_pool(size_t q_max_items, size_t threads_n);
// message all threads to terminate gracefully join them
~thread_pool()
{
try
{
for (size_t i = 0; i < threads_.size(); i++)
{
post_async_msg_(async_msg(async_msg_type::terminate), async_overflow_policy::block);
}
~thread_pool();
for (auto &t : threads_)
{
t.join();
}
}
catch (...)
{
}
}
thread_pool(const thread_pool &) = delete;
thread_pool &operator=(thread_pool &&) = delete;
void post_log(async_logger_ptr &&worker_ptr, details::log_msg &msg, async_overflow_policy overflow_policy)
{
async_msg async_m(std::move(worker_ptr), async_msg_type::log, msg);
post_async_msg_(std::move(async_m), overflow_policy);
}
void post_log(async_logger_ptr &&worker_ptr, details::log_msg &msg, async_overflow_policy overflow_policy);
void post_flush(async_logger_ptr &&worker_ptr, async_overflow_policy overflow_policy);
size_t overrun_counter();
void post_flush(async_logger_ptr &&worker_ptr, async_overflow_policy overflow_policy)
{
post_async_msg_(async_msg(std::move(worker_ptr), async_msg_type::flush), overflow_policy);
}
size_t overrun_counter()
{
return q_.overrun_counter();
}
private:
q_type q_;
std::vector<std::thread> threads_;
void post_async_msg_(async_msg &&new_msg, async_overflow_policy overflow_policy)
{
if (overflow_policy == async_overflow_policy::block)
{
q_.enqueue(std::move(new_msg));
}
else
{
q_.enqueue_nowait(std::move(new_msg));
}
}
void worker_loop_()
{
while (process_next_msg_()) {};
}
void post_async_msg_(async_msg &&new_msg, async_overflow_policy overflow_policy);
void worker_loop_();
// process next message in the queue
// return true if this thread should still be active (while no terminate msg
// was received)
bool process_next_msg_()
{
async_msg incoming_async_msg;
bool dequeued = q_.dequeue_for(incoming_async_msg, std::chrono::seconds(10));
if (!dequeued)
{
return true;
}
switch (incoming_async_msg.msg_type)
{
case async_msg_type::log:
{
auto msg = incoming_async_msg.to_log_msg();
incoming_async_msg.worker_ptr->backend_log_(msg);
return true;
}
case async_msg_type::flush:
{
incoming_async_msg.worker_ptr->backend_flush_();
return true;
}
case async_msg_type::terminate:
{
return false;
}
}
assert(false && "Unexpected async_msg_type");
return true;
}
bool process_next_msg_();
};
} // namespace details
} // namespace spdlog
#ifndef SPDLOG_STATIC_LIB
#include "spdlog/impl/thread_pool.cpp"
#endif

View File

@ -340,19 +340,19 @@ SPDLOG_INLINE void sleep_for_millis(int milliseconds) SPDLOG_NOEXCEPT
// wchar support for windows file names (SPDLOG_WCHAR_FILENAMES must be defined)
#if defined(_WIN32) && defined(SPDLOG_WCHAR_FILENAMES)
SPDLOG_INLINE std::string filename_to_str(const filename_t &filename)
SPDLOG_INLINE std::string filename_to_str(const filename_t &filename) SPDLOG_NOEXCEPT
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> c;
return c.to_bytes(filename);
}
#else
SPDLOG_INLINE std::string filename_to_str(const filename_t &filename)
SPDLOG_INLINE std::string filename_to_str(const filename_t &filename) SPDLOG_NOEXCEPT
{
return filename;
}
#endif
SPDLOG_INLINE int pid()
SPDLOG_INLINE int pid() SPDLOG_NOEXCEPT
{
#ifdef _WIN32

View File

@ -0,0 +1,45 @@
//
// Created by gabi on 5/8/19.
//
#ifdef SPDLOG_STATIC_LIB
#include "spdlog/details/periodic_worker.h"
#endif
#include "spdlog/common.h"
SPDLOG_INLINE spdlog::details::periodic_worker::periodic_worker(const std::function<void()> &callback_fun, std::chrono::seconds interval)
{
active_ = (interval > std::chrono::seconds::zero());
if (!active_)
{
return;
}
worker_thread_ = std::thread([this, callback_fun, interval]() {
for (;;)
{
std::unique_lock<std::mutex> lock(this->mutex_);
if (this->cv_.wait_for(lock, interval, [this] { return !this->active_; }))
{
return; // active_ == false, so exit this thread
}
callback_fun();
}
});
}
// stop the worker thread and join it
SPDLOG_INLINE spdlog::details::periodic_worker::~periodic_worker()
{
if (worker_thread_.joinable())
{
{
std::lock_guard<std::mutex> lock(mutex_);
active_ = false;
}
cv_.notify_one();
worker_thread_.join();
}
}

View File

@ -0,0 +1,116 @@
#ifdef SPDLOG_STATIC_LIB
#include "spdlog/async_logger.h"
#include "spdlog/details/thread_pool.h"
template class spdlog::details::mpmc_blocking_queue<spdlog::details::async_msg>;
#endif
#include "spdlog/common.h"
SPDLOG_INLINE spdlog::details::thread_pool::thread_pool(size_t q_max_items, size_t threads_n) : q_(q_max_items)
{
// std::cout << "thread_pool() q_size_bytes: " << q_size_bytes <<
// "\tthreads_n: " << threads_n << std::endl;
if (threads_n == 0 || threads_n > 1000)
{
throw spdlog_ex("spdlog::thread_pool(): invalid threads_n param (valid "
"range is 1-1000)");
}
for (size_t i = 0; i < threads_n; i++)
{
threads_.emplace_back(&thread_pool::worker_loop_, this);
}
}
// message all threads to terminate gracefully join them
SPDLOG_INLINE spdlog::details::thread_pool::~thread_pool()
{
try
{
for (size_t i = 0; i < threads_.size(); i++)
{
post_async_msg_(async_msg(async_msg_type::terminate), async_overflow_policy::block);
}
for (auto &t : threads_)
{
t.join();
}
}
catch (...)
{
}
}
void SPDLOG_INLINE spdlog::details::thread_pool::post_log(async_logger_ptr &&worker_ptr, details::log_msg &msg, async_overflow_policy overflow_policy)
{
async_msg async_m(std::move(worker_ptr), async_msg_type::log, msg);
post_async_msg_(std::move(async_m), overflow_policy);
}
void SPDLOG_INLINE spdlog::details::thread_pool::post_flush(async_logger_ptr &&worker_ptr, async_overflow_policy overflow_policy)
{
post_async_msg_(async_msg(std::move(worker_ptr), async_msg_type::flush), overflow_policy);
}
size_t SPDLOG_INLINE spdlog::details::thread_pool::overrun_counter()
{
return q_.overrun_counter();
}
void SPDLOG_INLINE spdlog::details::thread_pool::post_async_msg_(async_msg &&new_msg, async_overflow_policy overflow_policy)
{
if (overflow_policy == async_overflow_policy::block)
{
q_.enqueue(std::move(new_msg));
}
else
{
q_.enqueue_nowait(std::move(new_msg));
}
}
void SPDLOG_INLINE spdlog::details::thread_pool::worker_loop_()
{
while (process_next_msg_()) {};
}
// process next message in the queue
// return true if this thread should still be active (while no terminate msg
// was received)
bool SPDLOG_INLINE spdlog::details::thread_pool::process_next_msg_()
{
async_msg incoming_async_msg;
bool dequeued = q_.dequeue_for(incoming_async_msg, std::chrono::seconds(10));
if (!dequeued)
{
return true;
}
switch (incoming_async_msg.msg_type)
{
case async_msg_type::log:
{
auto msg = incoming_async_msg.to_log_msg();
incoming_async_msg.worker_ptr->backend_log_(msg);
return true;
}
case async_msg_type::flush:
{
incoming_async_msg.worker_ptr->backend_flush_();
return true;
}
case async_msg_type::terminate:
{
return false;
}
}
assert(false && "Unexpected async_msg_type");
return true;
}