Fixed the blue light stuck issue
This commit is contained in:
parent
aa19e52978
commit
418e242e42
2
github
2
github
@ -1 +1 @@
|
||||
Subproject commit ca5350f32137268f13811961d84f4fd96639ae67
|
||||
Subproject commit 35052680fe58de01102dfd4b65835df08c2a7f20
|
@ -145,7 +145,7 @@ function initializeIpc() {
|
||||
}
|
||||
|
||||
const localVersion = await currentClientVersion();
|
||||
const updateAvailable = !localVersion.isDevelopmentVersion() && (result.version.newerThan(localVersion) || result.channel !== clientAppInfo().clientChannel);
|
||||
const updateAvailable = !localVersion.isDevelopmentVersion() && (result.version.newerThan(localVersion, true) || result.channel !== clientAppInfo().clientChannel);
|
||||
targetRemoteVersion = updateAvailable ? result : undefined;
|
||||
|
||||
windowInstance?.webContents.send("client-updater-remote-status",
|
||||
|
@ -42,7 +42,15 @@ export class Version {
|
||||
return other.timestamp == this.timestamp;
|
||||
}
|
||||
|
||||
newerThan(other: Version) : boolean {
|
||||
newerThan(other: Version, compareTimestamps?: boolean) : boolean {
|
||||
if(other.timestamp > 0 && this.timestamp > 0 && typeof compareTimestamps === "boolean" && compareTimestamps) {
|
||||
if(other.timestamp > this.timestamp) {
|
||||
return false;
|
||||
} else if(other.timestamp < this.timestamp) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if(other.major > this.major) return false;
|
||||
else if(other.major < this.major) return true;
|
||||
|
||||
|
@ -17,9 +17,9 @@ namespace tc {
|
||||
virtual void event_execute_dropped(const std::chrono::system_clock::time_point& /* scheduled timestamp */) {}
|
||||
|
||||
std::unique_lock<std::timed_mutex> execute_lock(bool force) {
|
||||
if(force)
|
||||
return std::unique_lock<std::timed_mutex>(this->_execute_mutex);
|
||||
else {
|
||||
if(force) {
|
||||
return std::unique_lock<std::timed_mutex>(this->_execute_mutex);
|
||||
} else {
|
||||
auto lock = std::unique_lock<std::timed_mutex>(this->_execute_mutex, std::defer_lock);
|
||||
if(this->execute_lock_timeout.count() > 0) {
|
||||
(void) lock.try_lock_for(this->execute_lock_timeout);
|
||||
|
@ -14,7 +14,7 @@ using namespace tc::audio;
|
||||
void AudioOutputSource::clear() {
|
||||
std::lock_guard buffer_lock{this->buffer_mutex};
|
||||
this->buffer.clear();
|
||||
this->buffer_state = buffer_state::buffering;
|
||||
this->buffer_state = BufferState::buffering;
|
||||
this->fadeout_samples_left = 0;
|
||||
}
|
||||
|
||||
@ -26,7 +26,7 @@ void AudioOutputSource::apply_fadeout() {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto sample_byte_size = this->channel_count * sizeof(float) * fade_samples;
|
||||
const auto sample_byte_size = this->channel_count_ * sizeof(float) * fade_samples;
|
||||
assert(this->buffer.fill_count() >= sample_byte_size);
|
||||
auto write_ptr = (float*) ((char*) this->buffer.read_ptr() + (this->buffer.fill_count() - sample_byte_size));
|
||||
|
||||
@ -34,7 +34,7 @@ void AudioOutputSource::apply_fadeout() {
|
||||
const auto offset = (float) ((float) (index + 1) / (float) fade_samples);
|
||||
const auto volume = std::min(log10f(offset) / -2.71828182845904f, 1.f);
|
||||
|
||||
for(int channel{0}; channel < this->channel_count; channel++) {
|
||||
for(int channel{0}; channel < this->channel_count_; channel++) {
|
||||
*write_ptr++ *= volume;
|
||||
}
|
||||
}
|
||||
@ -54,12 +54,12 @@ void AudioOutputSource::apply_fadein() {
|
||||
* Note: We're using the read_ptr() here in order to correctly apply the effect.
|
||||
* This isn't really best practice but works.
|
||||
*/
|
||||
auto write_ptr = (float*) this->buffer.read_ptr() + this->fadeout_samples_left * this->channel_count;
|
||||
auto write_ptr = (float*) this->buffer.read_ptr() + this->fadeout_samples_left * this->channel_count_;
|
||||
for(size_t index{0}; index < fade_samples; index++) {
|
||||
const auto offset = (float) ((float) (index + 1) / (float) fade_samples);
|
||||
const auto volume = std::min(log10f(1 - offset) / -2.71828182845904f, 1.f);
|
||||
|
||||
for(int channel{0}; channel < this->channel_count; channel++) {
|
||||
for(int channel{0}; channel < this->channel_count_; channel++) {
|
||||
*write_ptr++ *= volume;
|
||||
}
|
||||
}
|
||||
@ -78,27 +78,27 @@ bool AudioOutputSource::pop_samples(void *target_buffer, size_t target_sample_co
|
||||
|
||||
bool AudioOutputSource::pop_samples_(void *target_buffer, size_t target_sample_count) {
|
||||
switch(this->buffer_state) {
|
||||
case buffer_state::fadeout: {
|
||||
case BufferState::fadeout: {
|
||||
/* Write as much we can */
|
||||
const auto write_samples = std::min(this->fadeout_samples_left, target_sample_count);
|
||||
const auto write_byte_size = write_samples * this->channel_count * sizeof(float);
|
||||
const auto write_byte_size = write_samples * this->channel_count_ * sizeof(float);
|
||||
memcpy(target_buffer, this->buffer.read_ptr(), write_byte_size);
|
||||
this->buffer.advance_read_ptr(write_byte_size);
|
||||
|
||||
/* Fill the rest with silence */
|
||||
const auto empty_samples = target_sample_count - write_samples;
|
||||
const auto empty_byte_size = empty_samples * this->channel_count * sizeof(float);
|
||||
const auto empty_byte_size = empty_samples * this->channel_count_ * sizeof(float);
|
||||
memset((char*) target_buffer + write_byte_size, 0, empty_byte_size);
|
||||
|
||||
this->fadeout_samples_left -= write_samples;
|
||||
if(!this->fadeout_samples_left) {
|
||||
log_trace(category::audio, tr("{} Successfully replayed fadeout sequence."), (void*) this);
|
||||
this->buffer_state = buffer_state::buffering;
|
||||
this->buffer_state = BufferState::buffering;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
case buffer_state::playing: {
|
||||
case BufferState::playing: {
|
||||
const auto buffered_samples = this->currently_buffered_samples();
|
||||
if(buffered_samples < target_sample_count + this->fadeout_frame_samples_) {
|
||||
const auto missing_samples = target_sample_count + this->fadeout_frame_samples_ - buffered_samples;
|
||||
@ -118,7 +118,7 @@ bool AudioOutputSource::pop_samples_(void *target_buffer, size_t target_sample_c
|
||||
/* Write the rest of unmodified buffer */
|
||||
const auto write_samples = buffered_samples - this->fadeout_samples_left;
|
||||
assert(write_samples <= target_sample_count);
|
||||
const auto write_byte_size = write_samples * this->channel_count * sizeof(float);
|
||||
const auto write_byte_size = write_samples * this->channel_count_ * sizeof(float);
|
||||
memcpy(target_buffer, this->buffer.read_ptr(), write_byte_size);
|
||||
this->buffer.advance_read_ptr(write_byte_size);
|
||||
|
||||
@ -126,14 +126,14 @@ bool AudioOutputSource::pop_samples_(void *target_buffer, size_t target_sample_c
|
||||
(void*) this, target_sample_count, buffered_samples, this->fadeout_frame_samples_, write_samples
|
||||
);
|
||||
|
||||
this->buffer_state = buffer_state::fadeout;
|
||||
this->buffer_state = BufferState::fadeout;
|
||||
if(write_samples < target_sample_count) {
|
||||
/* Fill the rest of the buffer with the fadeout content */
|
||||
this->pop_samples((char*) target_buffer + write_byte_size, target_sample_count - write_samples);
|
||||
}
|
||||
} else {
|
||||
/* We can just normally copy the buffer */
|
||||
const auto write_byte_size = target_sample_count * this->channel_count * sizeof(float);
|
||||
const auto write_byte_size = target_sample_count * this->channel_count_ * sizeof(float);
|
||||
memcpy(target_buffer, this->buffer.read_ptr(), write_byte_size);
|
||||
this->buffer.advance_read_ptr(write_byte_size);
|
||||
}
|
||||
@ -141,7 +141,7 @@ bool AudioOutputSource::pop_samples_(void *target_buffer, size_t target_sample_c
|
||||
return true;
|
||||
}
|
||||
|
||||
case buffer_state::buffering:
|
||||
case BufferState::buffering:
|
||||
/* Nothing to replay */
|
||||
return false;
|
||||
|
||||
@ -158,13 +158,13 @@ ssize_t AudioOutputSource::enqueue_samples(const void *source_buffer, size_t sam
|
||||
|
||||
ssize_t AudioOutputSource::enqueue_samples_(const void *source_buffer, size_t sample_count) {
|
||||
switch(this->buffer_state) {
|
||||
case buffer_state::fadeout:
|
||||
case buffer_state::buffering: {
|
||||
case BufferState::fadeout:
|
||||
case BufferState::buffering: {
|
||||
assert(this->currently_buffered_samples() >= this->fadeout_samples_left);
|
||||
assert(this->min_buffered_samples_ >= this->currently_buffered_samples() - this->fadeout_samples_left);
|
||||
const auto missing_samples = this->min_buffered_samples_ - (this->currently_buffered_samples() - this->fadeout_samples_left);
|
||||
const auto write_sample_count = std::min(missing_samples, sample_count);
|
||||
const auto write_byte_size = write_sample_count * this->channel_count * sizeof(float);
|
||||
const auto write_byte_size = write_sample_count * this->channel_count_ * sizeof(float);
|
||||
|
||||
assert(write_sample_count <= this->max_supported_buffering());
|
||||
memcpy(this->buffer.write_ptr(), source_buffer, write_byte_size);
|
||||
@ -184,7 +184,7 @@ ssize_t AudioOutputSource::enqueue_samples_(const void *source_buffer, size_t sa
|
||||
/* buffering finished */
|
||||
log_trace(category::audio, tr("{} Finished buffering {} samples. Fading them in."), (void*) this, this->min_buffered_samples_);
|
||||
this->apply_fadein();
|
||||
this->buffer_state = buffer_state::playing;
|
||||
this->buffer_state = BufferState::playing;
|
||||
if(sample_count > missing_samples) {
|
||||
/* we've more data to write */
|
||||
return this->enqueue_samples((const char*) source_buffer + write_byte_size, sample_count - missing_samples) + write_sample_count;
|
||||
@ -193,11 +193,11 @@ ssize_t AudioOutputSource::enqueue_samples_(const void *source_buffer, size_t sa
|
||||
}
|
||||
}
|
||||
|
||||
case buffer_state::playing: {
|
||||
case BufferState::playing: {
|
||||
const auto buffered_samples = this->currently_buffered_samples();
|
||||
|
||||
const auto write_sample_count = std::min(this->max_supported_buffering() - buffered_samples, sample_count);
|
||||
const auto write_byte_size = write_sample_count * this->channel_count * sizeof(float);
|
||||
const auto write_byte_size = write_sample_count * this->channel_count_ * sizeof(float);
|
||||
|
||||
memcpy(this->buffer.write_ptr(), source_buffer, write_byte_size);
|
||||
this->buffer.advance_write_ptr(write_byte_size);
|
||||
@ -208,19 +208,19 @@ ssize_t AudioOutputSource::enqueue_samples_(const void *source_buffer, size_t sa
|
||||
}
|
||||
|
||||
switch (this->overflow_strategy) {
|
||||
case overflow_strategy::discard_input:
|
||||
case OverflowStrategy::discard_input:
|
||||
return -2;
|
||||
|
||||
case overflow_strategy::discard_buffer_all:
|
||||
case OverflowStrategy::discard_buffer_all:
|
||||
this->buffer.clear();
|
||||
break;
|
||||
|
||||
case overflow_strategy::discard_buffer_half:
|
||||
case OverflowStrategy::discard_buffer_half:
|
||||
/* FIXME: This implementation is wrong! */
|
||||
this->buffer.advance_read_ptr(this->buffer.fill_count() / 2);
|
||||
break;
|
||||
|
||||
case overflow_strategy::ignore:
|
||||
case OverflowStrategy::ignore:
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -236,10 +236,10 @@ ssize_t AudioOutputSource::enqueue_samples_(const void *source_buffer, size_t sa
|
||||
|
||||
constexpr static auto kMaxStackBuffer{1024 * 8 * sizeof(float)};
|
||||
ssize_t AudioOutputSource::enqueue_samples_no_interleave(const void *source_buffer, size_t samples) {
|
||||
if(this->channel_count == 1) {
|
||||
if(this->channel_count_ == 1) {
|
||||
return this->enqueue_samples(source_buffer, samples);
|
||||
} else if(this->channel_count == 2) {
|
||||
const auto buffer_byte_size = samples * this->channel_count * sizeof(float);
|
||||
} else if(this->channel_count_ == 2) {
|
||||
const auto buffer_byte_size = samples * this->channel_count_ * sizeof(float);
|
||||
if(buffer_byte_size > kMaxStackBuffer) {
|
||||
/* We can't convert to interleave */
|
||||
return 0;
|
||||
@ -292,20 +292,20 @@ bool AudioOutputSource::set_min_buffered_samples(size_t samples) {
|
||||
|
||||
this->min_buffered_samples_ = samples;
|
||||
switch(this->buffer_state) {
|
||||
case buffer_state::fadeout:
|
||||
case buffer_state::buffering: {
|
||||
case BufferState::fadeout:
|
||||
case BufferState::buffering: {
|
||||
assert(this->currently_buffered_samples() >= this->fadeout_samples_left);
|
||||
const auto buffered_samples = this->currently_buffered_samples() - this->fadeout_samples_left;
|
||||
if(buffered_samples > this->min_buffered_samples_) {
|
||||
log_trace(category::audio, tr("{} Finished buffering {} samples (due to min buffered sample reduce). Fading them in."), (void*) this, this->min_buffered_samples_);
|
||||
this->apply_fadein();
|
||||
this->buffer_state = buffer_state::playing;
|
||||
this->buffer_state = BufferState::playing;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
case buffer_state::playing:
|
||||
case BufferState::playing:
|
||||
return true;
|
||||
|
||||
default:
|
||||
|
@ -17,30 +17,38 @@ namespace tc::audio {
|
||||
class AudioOutput;
|
||||
class AudioResampler;
|
||||
|
||||
namespace overflow_strategy {
|
||||
enum value {
|
||||
ignore,
|
||||
discard_buffer_all,
|
||||
discard_buffer_half,
|
||||
discard_input
|
||||
};
|
||||
}
|
||||
enum struct OverflowStrategy {
|
||||
ignore,
|
||||
discard_buffer_all,
|
||||
discard_buffer_half,
|
||||
discard_input
|
||||
};
|
||||
|
||||
class AudioOutputSource {
|
||||
friend class AudioOutput;
|
||||
public:
|
||||
size_t const channel_count;
|
||||
size_t const sample_rate;
|
||||
enum struct BufferState {
|
||||
/* Awaiting enough samples to replay and apply the fadein effect */
|
||||
buffering,
|
||||
/* We have encountered a buffer underflow. Applying fadeout effect and changing state to buffering. */
|
||||
fadeout,
|
||||
/* We're just normally replaying audio */
|
||||
playing
|
||||
};
|
||||
|
||||
/**
|
||||
* The maximum amount of samples which could be buffered.
|
||||
* @return
|
||||
*/
|
||||
[[nodiscard]] inline size_t max_supported_buffering() const {
|
||||
return this->buffer.capacity() / this->channel_count / sizeof(float);
|
||||
[[nodiscard]] inline auto channel_count() const -> size_t { return this->channel_count_; }
|
||||
[[nodiscard]] inline auto sample_rate() const -> size_t { return this->sample_rate_; }
|
||||
[[nodiscard]] inline auto state() const -> BufferState { return this->buffer_state; }
|
||||
|
||||
/**
|
||||
* The maximum amount of samples which could be buffered.
|
||||
* @return
|
||||
*/
|
||||
[[nodiscard]] inline auto max_supported_buffering() const -> size_t {
|
||||
return this->buffer.capacity() / this->channel_count_ / sizeof(float);
|
||||
}
|
||||
|
||||
[[nodiscard]] inline size_t max_buffering() const {
|
||||
[[nodiscard]] inline auto max_buffering() const -> size_t {
|
||||
const auto max_samples = this->max_supported_buffering();
|
||||
if(this->max_buffered_samples_ && this->max_buffered_samples_ <= max_samples) {
|
||||
return this->max_buffered_samples_;
|
||||
@ -54,7 +62,7 @@ namespace tc::audio {
|
||||
* @return
|
||||
*/
|
||||
[[nodiscard]] inline size_t currently_buffered_samples() const {
|
||||
return this->buffer.fill_count() / this->channel_count / sizeof(float);
|
||||
return this->buffer.fill_count() / this->channel_count_ / sizeof(float);
|
||||
}
|
||||
|
||||
|
||||
@ -64,7 +72,7 @@ namespace tc::audio {
|
||||
bool set_min_buffered_samples(size_t /* target samples */);
|
||||
bool set_max_buffered_samples(size_t /* target samples */);
|
||||
|
||||
overflow_strategy::value overflow_strategy{overflow_strategy::discard_buffer_half};
|
||||
OverflowStrategy overflow_strategy{OverflowStrategy::discard_buffer_half};
|
||||
|
||||
/* if it returns true then the it means that the buffer has been refilled, we have to test again */
|
||||
std::function<bool(size_t /* sample count */)> on_underflow;
|
||||
@ -78,24 +86,18 @@ namespace tc::audio {
|
||||
/* Consume N samples */
|
||||
bool pop_samples(void* /* output buffer */, size_t /* sample count */);
|
||||
private:
|
||||
enum struct buffer_state {
|
||||
/* Awaiting enough samples to replay and apply the fadein effect */
|
||||
buffering,
|
||||
/* We have encountered a buffer underflow. Applying fadeout effect and changing state to buffering. */
|
||||
fadeout,
|
||||
/* We're just normally replaying audio */
|
||||
playing
|
||||
};
|
||||
|
||||
AudioOutputSource(size_t channel_count, size_t sample_rate, ssize_t max_buffer_sample_count = -1) :
|
||||
channel_count{channel_count}, sample_rate{sample_rate},
|
||||
buffer{max_buffer_sample_count == -1 ? channel_count * sample_rate * sizeof(float) : max_buffer_sample_count * channel_count * sizeof(float)}
|
||||
channel_count_{channel_count}, sample_rate_{sample_rate},
|
||||
buffer{max_buffer_sample_count == -1 ? channel_count * sample_rate * sizeof(float) : max_buffer_sample_count * channel_count * sizeof(float)}
|
||||
{
|
||||
this->clear();
|
||||
}
|
||||
|
||||
size_t const channel_count_;
|
||||
size_t const sample_rate_;
|
||||
|
||||
std::recursive_mutex buffer_mutex{};
|
||||
enum buffer_state buffer_state{buffer_state::buffering};
|
||||
BufferState buffer_state{BufferState::buffering};
|
||||
tc::ring_buffer buffer;
|
||||
|
||||
size_t min_buffered_samples_{0};
|
||||
|
@ -5,13 +5,13 @@
|
||||
using namespace std;
|
||||
using namespace tc::audio;
|
||||
|
||||
AudioResampler::AudioResampler(size_t irate, size_t orate, size_t channels) : _input_rate{irate}, _output_rate{orate}, _channels{channels} {
|
||||
AudioResampler::AudioResampler(size_t irate, size_t orate, size_t channels) : input_rate_{irate}, output_rate_{orate}, channels_{channels} {
|
||||
if(this->input_rate() != this->output_rate()) {
|
||||
soxr_error_t error;
|
||||
this->soxr_handle = soxr_create((double) this->_input_rate, (double) this->_output_rate, (unsigned) this->_channels, &error, nullptr, nullptr, nullptr);
|
||||
this->soxr_handle = soxr_create((double) this->input_rate_, (double) this->output_rate_, (unsigned) this->channels_, &error, nullptr, nullptr, nullptr);
|
||||
|
||||
if(!this->soxr_handle) {
|
||||
log_error(category::audio, tr("Failed to create soxr resampler: {}. Input: {}; Output: {}; Channels: {}"), error, this->_input_rate, this->_output_rate, this->_channels);
|
||||
log_error(category::audio, tr("Failed to create soxr resampler: {}. Input: {}; Output: {}; Channels: {}"), error, this->input_rate_, this->output_rate_, this->channels_);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -23,13 +23,16 @@ AudioResampler::~AudioResampler() {
|
||||
|
||||
ssize_t AudioResampler::process(void *output, const void *input, size_t input_length) {
|
||||
if(this->io_ratio() == 1) {
|
||||
if(input != output)
|
||||
memcpy(output, input, input_length * this->_channels * 4);
|
||||
if(input != output) {
|
||||
memcpy(output, input, input_length * this->channels_ * 4);
|
||||
}
|
||||
|
||||
return input_length;
|
||||
}
|
||||
if(!this->soxr_handle)
|
||||
return -2;
|
||||
|
||||
if(!this->soxr_handle) {
|
||||
return -2;
|
||||
}
|
||||
|
||||
size_t output_length = 0;
|
||||
auto error = soxr_process(this->soxr_handle, input, input_length, nullptr, output, this->estimated_output_size(input_length), &output_length);
|
||||
|
@ -16,27 +16,27 @@ namespace tc::audio {
|
||||
AudioResampler(size_t /* input rate */, size_t /* output rate */, size_t /* channels */);
|
||||
virtual ~AudioResampler();
|
||||
|
||||
[[nodiscard]] inline size_t channels() const { return this->_channels; }
|
||||
[[nodiscard]] inline size_t input_rate() const { return this->_input_rate; }
|
||||
[[nodiscard]] inline size_t output_rate() const { return this->_output_rate; }
|
||||
[[nodiscard]] inline size_t channels() const { return this->channels_; }
|
||||
[[nodiscard]] inline size_t input_rate() const { return this->input_rate_; }
|
||||
[[nodiscard]] inline size_t output_rate() const { return this->output_rate_; }
|
||||
|
||||
[[nodiscard]] inline long double io_ratio() const { return (long double) this->_output_rate / (long double) this->_input_rate; }
|
||||
[[nodiscard]] inline long double io_ratio() const { return (long double) this->output_rate_ / (long double) this->input_rate_; }
|
||||
[[nodiscard]] inline size_t estimated_output_size(size_t input_length) {
|
||||
if(!this->soxr_handle) return input_length; /* no resembling needed */
|
||||
return (size_t) ceill(this->io_ratio() * input_length + *soxr_num_clips(this->soxr_handle)) + 1;
|
||||
}
|
||||
[[nodiscard]] inline size_t input_size(size_t output_length) const {
|
||||
return (size_t) ceill((long double) this->_input_rate / (long double) this->_output_rate * output_length);
|
||||
return (size_t) ceill((long double) this->input_rate_ / (long double) this->output_rate_ * output_length);
|
||||
}
|
||||
|
||||
[[nodiscard]] inline bool valid() { return this->io_ratio() == 1 || this->soxr_handle != nullptr; }
|
||||
|
||||
[[nodiscard]] ssize_t process(void* /* output */, const void* /* input */, size_t /* input length */);
|
||||
private:
|
||||
size_t const _channels = 0;
|
||||
size_t const _input_rate = 0;
|
||||
size_t const _output_rate = 0;
|
||||
size_t const channels_{0};
|
||||
size_t const input_rate_{0};
|
||||
size_t const output_rate_{0};
|
||||
|
||||
soxr_t soxr_handle = nullptr;
|
||||
soxr_t soxr_handle{nullptr};
|
||||
};
|
||||
}
|
@ -6,11 +6,12 @@ using namespace tc;
|
||||
using namespace tc::audio;
|
||||
|
||||
std::shared_ptr<SampleBuffer> SampleBuffer::allocate(uint8_t channels, uint16_t samples) {
|
||||
auto _buffer = (SampleBuffer*) malloc(SampleBuffer::HEAD_LENGTH + channels * samples * 4);
|
||||
if(!_buffer)
|
||||
return nullptr;
|
||||
auto buffer = (SampleBuffer*) malloc(sizeof(SampleBuffer) + channels * samples * 4);
|
||||
if(!buffer) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
_buffer->sample_size = samples;
|
||||
_buffer->sample_index = 0;
|
||||
return shared_ptr<SampleBuffer>(_buffer, ::free);
|
||||
buffer->sample_size = samples;
|
||||
buffer->sample_index = 0;
|
||||
return shared_ptr<SampleBuffer>(buffer, ::free);
|
||||
}
|
||||
|
@ -4,34 +4,20 @@
|
||||
#include <memory>
|
||||
|
||||
namespace tc::audio {
|
||||
#ifdef WIN32
|
||||
#pragma pack(push,1)
|
||||
#define __attribute__packed_1
|
||||
#else
|
||||
#define __attribute__packed_1 __attribute__((packed, aligned(1)))
|
||||
#endif
|
||||
/* Every sample is a float (4byte) */
|
||||
struct __attribute__packed_1 SampleBuffer {
|
||||
static constexpr size_t HEAD_LENGTH = 4;
|
||||
struct SampleBuffer {
|
||||
static std::shared_ptr<SampleBuffer> allocate(uint8_t /* channels */, uint16_t /* samples */);
|
||||
|
||||
uint16_t sample_size;
|
||||
uint16_t sample_index;
|
||||
|
||||
char sample_data[
|
||||
/* windows does not allow zero sized arrays */
|
||||
#ifndef WIN32
|
||||
0
|
||||
#else
|
||||
1 /* windows does not allow zero sized arrays */
|
||||
1
|
||||
#endif
|
||||
];
|
||||
|
||||
static std::shared_ptr<SampleBuffer> allocate(uint8_t /* channels */, uint16_t /* samples */);
|
||||
};
|
||||
|
||||
#ifndef WIN32
|
||||
static_assert(sizeof(SampleBuffer) == 4, "Invalid SampleBuffer packaging!");
|
||||
#else
|
||||
#pragma pack(pop)
|
||||
static_assert(sizeof(SampleBuffer) == 5, "Invalid SampleBuffer packaging!");
|
||||
#endif
|
||||
}
|
@ -67,8 +67,8 @@ void AudioOutputStreamWrapper::do_wrap(const v8::Local<v8::Object> &obj) {
|
||||
return;
|
||||
}
|
||||
|
||||
Nan::DefineOwnProperty(this->handle(), Nan::New<v8::String>("sample_rate").ToLocalChecked(), Nan::New<v8::Number>((uint32_t) handle->sample_rate), v8::ReadOnly);
|
||||
Nan::DefineOwnProperty(this->handle(), Nan::New<v8::String>("channels").ToLocalChecked(), Nan::New<v8::Number>((uint32_t) handle->channel_count), v8::ReadOnly);
|
||||
Nan::DefineOwnProperty(this->handle(), Nan::New<v8::String>("sample_rate").ToLocalChecked(), Nan::New<v8::Number>((uint32_t) handle->sample_rate()), v8::ReadOnly);
|
||||
Nan::DefineOwnProperty(this->handle(), Nan::New<v8::String>("channels").ToLocalChecked(), Nan::New<v8::Number>((uint32_t) handle->channel_count()), v8::ReadOnly);
|
||||
|
||||
if(this->_own_handle) {
|
||||
this->call_underflow = Nan::async_callback([&]{
|
||||
@ -141,12 +141,12 @@ NAN_METHOD(AudioOutputStreamWrapper::_write_data) {
|
||||
auto interleaved = info[1]->BooleanValue(info.GetIsolate());
|
||||
auto js_buffer = info[0].As<v8::ArrayBuffer>()->GetContents();
|
||||
|
||||
if(js_buffer.ByteLength() % (handle->channel_count * 4) != 0) {
|
||||
if(js_buffer.ByteLength() % (handle->channel_count() * 4) != 0) {
|
||||
Nan::ThrowError("input buffer invalid size");
|
||||
return;
|
||||
}
|
||||
|
||||
auto samples = js_buffer.ByteLength() / handle->channel_count / 4;
|
||||
auto samples = js_buffer.ByteLength() / handle->channel_count() / 4;
|
||||
info.GetReturnValue().Set((int32_t) write_data(handle, js_buffer.Data(), samples, interleaved));
|
||||
}
|
||||
|
||||
@ -168,12 +168,12 @@ NAN_METHOD(AudioOutputStreamWrapper::_write_data_rated) {
|
||||
auto interleaved = info[1]->BooleanValue(info.GetIsolate());
|
||||
auto js_buffer = info[0].As<v8::ArrayBuffer>()->GetContents();
|
||||
|
||||
auto samples = js_buffer.ByteLength() / handle->channel_count / 4;
|
||||
if(sample_rate == handle->sample_rate) {
|
||||
auto samples = js_buffer.ByteLength() / handle->channel_count() / 4;
|
||||
if(sample_rate == handle->sample_rate()) {
|
||||
info.GetReturnValue().Set((int32_t) write_data(handle, js_buffer.Data(), samples, interleaved));
|
||||
} else {
|
||||
if(!client->_resampler || client->_resampler->input_rate() != sample_rate)
|
||||
client->_resampler = make_unique<AudioResampler>((size_t) sample_rate, handle->sample_rate, handle->channel_count);
|
||||
client->_resampler = make_unique<AudioResampler>((size_t) sample_rate, handle->sample_rate(), handle->channel_count());
|
||||
|
||||
if(!client->_resampler || !client->_resampler->valid()) {
|
||||
Nan::ThrowError("Resampling failed (invalid resampler)");
|
||||
@ -182,7 +182,7 @@ NAN_METHOD(AudioOutputStreamWrapper::_write_data_rated) {
|
||||
|
||||
//TODO: Use a tmp preallocated buffer here!
|
||||
ssize_t target_samples = client->_resampler->estimated_output_size(samples);
|
||||
auto buffer = SampleBuffer::allocate((uint8_t) handle->channel_count, max((uint16_t) samples, (uint16_t) target_samples));
|
||||
auto buffer = SampleBuffer::allocate((uint8_t) handle->channel_count(), max((uint16_t) samples, (uint16_t) target_samples));
|
||||
auto source_buffer = js_buffer.Data();
|
||||
if(!interleaved) {
|
||||
auto src_buffer = (float*) js_buffer.Data();
|
||||
@ -220,7 +220,7 @@ NAN_METHOD(AudioOutputStreamWrapper::_get_buffer_latency) {
|
||||
return;
|
||||
}
|
||||
|
||||
info.GetReturnValue().Set((float) handle->min_buffered_samples() / (float) handle->sample_rate);
|
||||
info.GetReturnValue().Set((float) handle->min_buffered_samples() / (float) handle->sample_rate());
|
||||
}
|
||||
|
||||
NAN_METHOD(AudioOutputStreamWrapper::_set_buffer_latency) {
|
||||
@ -237,7 +237,7 @@ NAN_METHOD(AudioOutputStreamWrapper::_set_buffer_latency) {
|
||||
return;
|
||||
}
|
||||
|
||||
handle->set_min_buffered_samples((size_t) ceil(handle->sample_rate * info[0]->NumberValue(Nan::GetCurrentContext()).FromMaybe(0)));
|
||||
handle->set_min_buffered_samples((size_t) ceil(handle->sample_rate() * info[0]->NumberValue(Nan::GetCurrentContext()).FromMaybe(0)));
|
||||
}
|
||||
|
||||
NAN_METHOD(AudioOutputStreamWrapper::_get_buffer_max_latency) {
|
||||
@ -249,7 +249,7 @@ NAN_METHOD(AudioOutputStreamWrapper::_get_buffer_max_latency) {
|
||||
return;
|
||||
}
|
||||
|
||||
info.GetReturnValue().Set((float) handle->max_buffering() / (float) handle->sample_rate);
|
||||
info.GetReturnValue().Set((float) handle->max_buffering() / (float) handle->sample_rate());
|
||||
}
|
||||
|
||||
NAN_METHOD(AudioOutputStreamWrapper::_set_buffer_max_latency) {
|
||||
@ -266,7 +266,7 @@ NAN_METHOD(AudioOutputStreamWrapper::_set_buffer_max_latency) {
|
||||
return;
|
||||
}
|
||||
|
||||
handle->set_max_buffered_samples((size_t) ceil(handle->sample_rate * info[0]->NumberValue(Nan::GetCurrentContext()).FromMaybe(0)));
|
||||
handle->set_max_buffered_samples((size_t) ceil(handle->sample_rate() * info[0]->NumberValue(Nan::GetCurrentContext()).FromMaybe(0)));
|
||||
}
|
||||
|
||||
NAN_METHOD(AudioOutputStreamWrapper::_flush_buffer) {
|
||||
|
@ -104,8 +104,8 @@ namespace tc::audio::sounds {
|
||||
this->initialize_playback();
|
||||
|
||||
auto max_samples = (size_t)
|
||||
std::max(this->output_source->sample_rate, this->file_handle->sample_rate()) * kBufferChunkTimespan * 8 *
|
||||
std::max(this->file_handle->channels(), this->output_source->channel_count);
|
||||
std::max(this->output_source->sample_rate(), this->file_handle->sample_rate()) * kBufferChunkTimespan * 8 *
|
||||
std::max(this->file_handle->channels(), this->output_source->channel_count());
|
||||
this->cache_buffer = ::malloc((size_t) (max_samples * sizeof(float)));
|
||||
if(!this->cache_buffer) {
|
||||
if(auto callback{this->settings_.callback}; callback)
|
||||
@ -138,7 +138,7 @@ namespace tc::audio::sounds {
|
||||
return;
|
||||
}
|
||||
|
||||
if(!merge::merge_channels_interleaved(this->cache_buffer, this->output_source->channel_count, this->cache_buffer, this->file_handle->channels(), samples_to_read)) {
|
||||
if(!merge::merge_channels_interleaved(this->cache_buffer, this->output_source->channel_count(), this->cache_buffer, this->file_handle->channels(), samples_to_read)) {
|
||||
log_warn(category::audio, tr("failed to merge channels for replaying a sound"));
|
||||
return;
|
||||
}
|
||||
@ -149,7 +149,7 @@ namespace tc::audio::sounds {
|
||||
return;
|
||||
}
|
||||
|
||||
audio::apply_gain(this->cache_buffer, this->output_source->channel_count, resampled_samples, this->settings_.volume);
|
||||
audio::apply_gain(this->cache_buffer, this->output_source->channel_count(), resampled_samples, this->settings_.volume);
|
||||
this->output_source->enqueue_samples(this->cache_buffer, resampled_samples);
|
||||
if(this->could_enqueue_next_buffer())
|
||||
audio::decode_event_loop->schedule(this->shared_from_this());
|
||||
@ -169,9 +169,9 @@ namespace tc::audio::sounds {
|
||||
|
||||
const auto max_buffer = (size_t) ceil(global_audio_output->sample_rate() * kBufferChunkTimespan * 3);
|
||||
this->output_source = global_audio_output->create_source(max_buffer);
|
||||
this->output_source->overflow_strategy = audio::overflow_strategy::ignore;
|
||||
this->output_source->overflow_strategy = audio::OverflowStrategy::ignore;
|
||||
this->output_source->set_max_buffered_samples(max_buffer);
|
||||
this->output_source->set_min_buffered_samples((size_t) floor(this->output_source->sample_rate * 0.04));
|
||||
this->output_source->set_min_buffered_samples((size_t) floor(this->output_source->sample_rate() * 0.04));
|
||||
|
||||
auto weak_this = this->weak_from_this();
|
||||
this->output_source->on_underflow = [weak_this](size_t sample_count){
|
||||
@ -203,12 +203,12 @@ namespace tc::audio::sounds {
|
||||
log_warn(category::audio, tr("Having an audio overflow while playing a sound."));
|
||||
};
|
||||
|
||||
this->resampler = std::make_unique<AudioResampler>(this->file_handle->sample_rate(), this->output_source->sample_rate, this->output_source->channel_count);
|
||||
this->resampler = std::make_unique<AudioResampler>(this->file_handle->sample_rate(), this->output_source->sample_rate(), this->output_source->channel_count());
|
||||
}
|
||||
|
||||
|
||||
[[nodiscard]] inline size_t cache_buffer_sample_size() const {
|
||||
return (size_t) (this->output_source->sample_rate * kBufferChunkTimespan);
|
||||
return (size_t) (this->output_source->sample_rate() * kBufferChunkTimespan);
|
||||
}
|
||||
|
||||
[[nodiscard]] inline bool could_enqueue_next_buffer() const {
|
||||
|
@ -1,11 +1,8 @@
|
||||
#include "VoiceClient.h"
|
||||
#include "../../audio/AudioOutput.h"
|
||||
#include "../../audio/codec/Converter.h"
|
||||
#include "../../audio/codec/OpusConverter.h"
|
||||
#include "../../audio/AudioMerger.h"
|
||||
#include "../../audio/js/AudioOutputStream.h"
|
||||
#include "../../audio/AudioEventLoop.h"
|
||||
#include "../../logger.h"
|
||||
#include "../../audio/AudioGain.h"
|
||||
|
||||
using namespace std;
|
||||
@ -22,7 +19,8 @@ extern tc::audio::AudioOutput* global_audio_output;
|
||||
#else
|
||||
#define _field_(name, value) .name = value
|
||||
#endif
|
||||
const codec::condec_info codec::info[6] = {
|
||||
|
||||
const codec::CodecInfo codec::info[6] = {
|
||||
{
|
||||
_field_(supported, false),
|
||||
_field_(name, "speex_narrowband"),
|
||||
@ -46,21 +44,23 @@ const codec::condec_info codec::info[6] = {
|
||||
{
|
||||
_field_(supported, true),
|
||||
_field_(name, "opus_voice"),
|
||||
_field_(new_converter, [](string& error) -> shared_ptr<Converter> {
|
||||
auto result = make_shared<OpusConverter>(1, 48000, 960);
|
||||
if(!result->initialize(error, OPUS_APPLICATION_VOIP))
|
||||
return nullptr;
|
||||
return dynamic_pointer_cast<Converter>(result);
|
||||
_field_(new_converter, [](string& error) -> std::shared_ptr<Converter> {
|
||||
auto result = std::make_shared<OpusConverter>(1, 48000, 960);
|
||||
if(!result->initialize(error, OPUS_APPLICATION_VOIP)) {
|
||||
return nullptr;
|
||||
}
|
||||
return std::dynamic_pointer_cast<Converter>(result);
|
||||
})
|
||||
},
|
||||
{
|
||||
_field_(supported, true),
|
||||
_field_(name, "opus_music"),
|
||||
_field_(new_converter, [](string& error) -> shared_ptr<Converter> {
|
||||
auto result = make_shared<OpusConverter>(2, 48000, 960);
|
||||
if(!result->initialize(error, OPUS_APPLICATION_AUDIO))
|
||||
return nullptr;
|
||||
return dynamic_pointer_cast<Converter>(result);
|
||||
_field_(new_converter, [](string& error) -> std::shared_ptr<Converter> {
|
||||
auto result = std::make_shared<OpusConverter>(2, 48000, 960);
|
||||
if(!result->initialize(error, OPUS_APPLICATION_AUDIO)) {
|
||||
return nullptr;
|
||||
}
|
||||
return std::dynamic_pointer_cast<Converter>(result);
|
||||
})
|
||||
}
|
||||
};
|
||||
@ -240,34 +240,19 @@ void VoiceClient::initialize() {
|
||||
|
||||
assert(global_audio_output);
|
||||
client->output_source = global_audio_output->create_source();
|
||||
client->output_source->overflow_strategy = audio::overflow_strategy::ignore;
|
||||
client->output_source->set_max_buffered_samples((size_t) ceil(client->output_source->sample_rate * 0.5));
|
||||
client->output_source->set_min_buffered_samples((size_t) ceil(client->output_source->sample_rate * 0.04));
|
||||
client->output_source->overflow_strategy = audio::OverflowStrategy::ignore;
|
||||
client->output_source->set_max_buffered_samples((size_t) ceil(client->output_source->sample_rate() * 0.5));
|
||||
client->output_source->set_min_buffered_samples((size_t) ceil(client->output_source->sample_rate() * 0.04));
|
||||
|
||||
client->output_source->on_underflow = [weak_this](size_t sample_count){ /* this callback will never be called when the client has been deallocated */
|
||||
client->output_source->on_underflow = [weak_this](size_t sample_count) {
|
||||
auto client = weak_this.lock();
|
||||
if(!client) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if(client->state_ == state::stopping) {
|
||||
client->set_state(state::stopped);
|
||||
} else if(client->state_ != state::stopped) {
|
||||
if(client->_last_received_packet + chrono::seconds{1} < chrono::system_clock::now()) {
|
||||
client->set_state(state::stopped);
|
||||
log_warn(category::audio, tr("Client {} has a audio buffer underflow for {} samples and not received any data for one second. Stopping replay."), client->client_id_, sample_count);
|
||||
} else {
|
||||
if(client->state_ != state::buffering) {
|
||||
log_warn(category::audio, tr("Client {} has a audio buffer underflow for {} samples. Buffer again."), client->client_id_, sample_count);
|
||||
client->set_state(state::buffering);
|
||||
}
|
||||
|
||||
audio::decode_event_loop->schedule(static_pointer_cast<event::EventEntry>(client));
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
return client->handle_output_underflow(sample_count);
|
||||
};
|
||||
|
||||
client->output_source->on_overflow = [weak_this](size_t count){
|
||||
auto client = weak_this.lock();
|
||||
if(!client) {
|
||||
@ -280,10 +265,54 @@ void VoiceClient::initialize() {
|
||||
}
|
||||
|
||||
void VoiceClient::execute_tick() {
|
||||
if(this->state_ == state::buffering && this->_last_received_packet + chrono::milliseconds{250} < chrono::system_clock::now()) {
|
||||
this->set_state(state::stopped);
|
||||
log_debug(category::audio, tr("Audio stop packet for client {} seems to be lost. Stopping playback."), this->client_id_);
|
||||
}
|
||||
switch(this->state_) {
|
||||
case state::buffering:
|
||||
if(this->_last_received_packet + chrono::milliseconds{250} < chrono::system_clock::now()) {
|
||||
this->set_state(state::stopped);
|
||||
log_debug(category::audio, tr("Audio stop packet for client {} seems to be lost. Stopping playback."), this->client_id_);
|
||||
}
|
||||
break;
|
||||
|
||||
case state::stopping: {
|
||||
auto output = this->output_source;
|
||||
if(!output) {
|
||||
this->set_state(state::stopped);
|
||||
break;
|
||||
}
|
||||
|
||||
using BufferState = audio::AudioOutputSource::BufferState;
|
||||
switch(output->state()) {
|
||||
case BufferState::fadeout:
|
||||
/*
|
||||
* Even though we're in fadeout it's pretty reasonable to already set the state to stopped
|
||||
* especially since the tick method will only be called all 500ms.
|
||||
*/
|
||||
|
||||
case BufferState::buffering:
|
||||
/* We have no more data to replay */
|
||||
|
||||
this->set_state(state::stopped);
|
||||
break;
|
||||
|
||||
case BufferState::playing:
|
||||
break;
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case state::playing:
|
||||
case state::stopped:
|
||||
/* Nothing to do or to check. */
|
||||
break;
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void VoiceClient::initialize_js_object() {
|
||||
@ -320,14 +349,16 @@ inline constexpr bool packet_id_less(uint16_t lower, uint16_t upper, uint16_t wi
|
||||
|
||||
if(bounds - window <= lower) {
|
||||
uint16_t max_clip = lower + window;
|
||||
if(upper <= max_clip)
|
||||
if(upper <= max_clip) {
|
||||
return true;
|
||||
else if(upper > lower)
|
||||
} else if(upper > lower) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
} else {
|
||||
if(lower >= upper)
|
||||
if(lower >= upper) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return upper - lower <= window;
|
||||
}
|
||||
@ -340,16 +371,16 @@ inline constexpr uint16_t packet_id_diff(uint16_t lower, uint16_t upper) {
|
||||
}
|
||||
|
||||
#define MAX_LOST_PACKETS (6)
|
||||
#define target_buffer_length 16384
|
||||
void VoiceClient::process_packet(uint16_t packet_id, const pipes::buffer_view& buffer, codec::value codec, bool is_head) {
|
||||
#define TEMP_BUFFER_LENGTH 16384
|
||||
void VoiceClient::process_packet(uint16_t packet_id, const pipes::buffer_view& buffer, codec::value buffer_codec, bool is_head) {
|
||||
#if 0
|
||||
if(rand() % 10 == 0) {
|
||||
log_info(category::audio, tr("Dropping audio packet id {}"), packet_id);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
if(codec < 0 || codec > this->codec.size()) {
|
||||
log_warn(category::voice_connection, tr("Received voice packet from client {} with unknown codec ({})"), this->client_id_, codec);
|
||||
if(buffer_codec < 0 || buffer_codec > this->codec.size()) {
|
||||
log_warn(category::voice_connection, tr("Received voice packet from client {} with unknown codec ({})"), this->client_id_, buffer_codec);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -358,19 +389,19 @@ void VoiceClient::process_packet(uint16_t packet_id, const pipes::buffer_view& b
|
||||
return;
|
||||
}
|
||||
|
||||
auto& codec_data = this->codec[codec];
|
||||
auto& codec_data = this->codec[buffer_codec];
|
||||
if(codec_data.state == AudioCodec::State::UNINITIALIZED)
|
||||
this->initialize_code(codec);
|
||||
this->initialize_code(buffer_codec);
|
||||
|
||||
if(codec_data.state != AudioCodec::State::INITIALIZED_SUCCESSFULLY) {
|
||||
log_warn(category::voice_connection, tr("Dropping audio packet because audio codec {} hasn't been initialized successfully (state: {})"), codec, (int) codec_data.state);
|
||||
log_warn(category::voice_connection, tr("Dropping audio packet because audio codec {} hasn't been initialized successfully (state: {})"), buffer_codec, (int) codec_data.state);
|
||||
return;
|
||||
}
|
||||
//TODO: short circuit handling if we've muted him (e.g. volume = 0)
|
||||
|
||||
auto encoded_buffer = new EncodedBuffer{};
|
||||
encoded_buffer->packet_id = packet_id;
|
||||
encoded_buffer->codec = codec;
|
||||
encoded_buffer->codec = buffer_codec;
|
||||
encoded_buffer->receive_timestamp = chrono::system_clock::now();
|
||||
encoded_buffer->buffer = buffer.own_buffer();
|
||||
encoded_buffer->head = is_head;
|
||||
@ -404,10 +435,11 @@ void VoiceClient::process_packet(uint16_t packet_id, const pipes::buffer_view& b
|
||||
}
|
||||
|
||||
encoded_buffer->next = head;
|
||||
if(prv_head)
|
||||
if(prv_head) {
|
||||
prv_head->next = encoded_buffer;
|
||||
else
|
||||
} else {
|
||||
codec_data.pending_buffers = encoded_buffer;
|
||||
}
|
||||
}
|
||||
codec_data.last_packet_timestamp = encoded_buffer->receive_timestamp;
|
||||
codec_data.process_pending = true;
|
||||
@ -424,10 +456,16 @@ void VoiceClient::cancel_replay() {
|
||||
output->clear();
|
||||
}
|
||||
|
||||
this->set_state(state::stopped);
|
||||
audio::decode_event_loop->cancel(static_pointer_cast<event::EventEntry>(this->ref()));
|
||||
{
|
||||
auto execute_lock = this->execute_lock(true);
|
||||
this->drop_enqueued_buffers();
|
||||
}
|
||||
|
||||
auto execute_lock = this->execute_lock(true);
|
||||
this->set_state(state::stopped);
|
||||
}
|
||||
|
||||
void VoiceClient::drop_enqueued_buffers() {
|
||||
for(auto& codec_entry : this->codec) {
|
||||
auto head = codec_entry.pending_buffers;
|
||||
while(head) {
|
||||
@ -436,14 +474,15 @@ void VoiceClient::cancel_replay() {
|
||||
head = tmp;
|
||||
}
|
||||
|
||||
codec_entry.pending_buffers = nullptr;
|
||||
codec_entry.force_replay = nullptr;
|
||||
codec_entry.pending_buffers = nullptr;
|
||||
codec_entry.force_replay = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void VoiceClient::event_execute(const std::chrono::system_clock::time_point &scheduled) {
|
||||
if(!this->output_source) {
|
||||
/* Audio hasn't been initialized yet. This also means there is no audio to be processed */
|
||||
/* Audio hasn't been initialized yet. This also means there is no audio to be processed. */
|
||||
this->drop_enqueued_buffers();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -454,12 +493,9 @@ void VoiceClient::event_execute(const std::chrono::system_clock::time_point &sch
|
||||
auto timeout = chrono::system_clock::now() + max_time;
|
||||
|
||||
for(auto& audio_codec : this->codec) {
|
||||
if(!audio_codec.process_pending) {
|
||||
continue;
|
||||
}
|
||||
|
||||
unique_lock lock{audio_codec.pending_lock};
|
||||
do {
|
||||
std::unique_lock lock{audio_codec.pending_lock};
|
||||
while(audio_codec.process_pending) {
|
||||
assert(lock.owns_lock());
|
||||
EncodedBuffer* replay_head{nullptr};
|
||||
uint16_t local_last_pid{audio_codec.last_packet_id};
|
||||
|
||||
@ -482,8 +518,9 @@ void VoiceClient::event_execute(const std::chrono::system_clock::time_point &sch
|
||||
//Trying to replay the sequence
|
||||
head = audio_codec.pending_buffers;
|
||||
while(head && head->packet_id == audio_codec.last_packet_id + 1) {
|
||||
if(!replay_head)
|
||||
if(!replay_head) {
|
||||
replay_head = audio_codec.pending_buffers;
|
||||
}
|
||||
|
||||
audio_codec.last_packet_id++;
|
||||
prv_head = head;
|
||||
@ -504,13 +541,16 @@ void VoiceClient::event_execute(const std::chrono::system_clock::time_point &sch
|
||||
|
||||
while(skip_ptr[0]->next) {
|
||||
for(size_t i = 0; i < SKIP_SEQ_LENGTH; i++) {
|
||||
if(!skip_ptr[i]->next || skip_ptr[i]->packet_id + 1 != skip_ptr[i]->next->packet_id)
|
||||
if(!skip_ptr[i]->next || skip_ptr[i]->packet_id + 1 != skip_ptr[i]->next->packet_id) {
|
||||
break;
|
||||
}
|
||||
|
||||
skip_ptr[i + 1] = skip_ptr[i]->next;
|
||||
}
|
||||
if(skip_ptr[SKIP_SEQ_LENGTH])
|
||||
|
||||
if(skip_ptr[SKIP_SEQ_LENGTH]) {
|
||||
break;
|
||||
}
|
||||
|
||||
skip_ptr[0] = skip_ptr[0]->next;
|
||||
}
|
||||
@ -522,7 +562,8 @@ void VoiceClient::event_execute(const std::chrono::system_clock::time_point &sch
|
||||
skip_ptr[SKIP_SEQ_LENGTH - 1]->next = nullptr;
|
||||
log_trace(category::voice_connection, tr("Skipping from {} to {} because of {} packets in a row"), audio_codec.last_packet_id, replay_head->packet_id, SKIP_SEQ_LENGTH);
|
||||
|
||||
/* Do not set process_pending to false, because we're not done
|
||||
/*
|
||||
* Do not set process_pending to false, because we're not done
|
||||
* We're just replaying all loose packets which are not within a sequence until we reach a sequence
|
||||
* In the next loop the sequence will be played
|
||||
*/
|
||||
@ -532,6 +573,7 @@ void VoiceClient::event_execute(const std::chrono::system_clock::time_point &sch
|
||||
if(packet_id_diff(audio_codec.last_packet_id, head->packet_id) >= 5) {
|
||||
break;
|
||||
}
|
||||
|
||||
head = head->next;
|
||||
}
|
||||
|
||||
@ -549,6 +591,7 @@ void VoiceClient::event_execute(const std::chrono::system_clock::time_point &sch
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(!replay_head) {
|
||||
audio_codec.process_pending = false;
|
||||
break;
|
||||
@ -556,8 +599,9 @@ void VoiceClient::event_execute(const std::chrono::system_clock::time_point &sch
|
||||
|
||||
{
|
||||
auto head = replay_head;
|
||||
while(head->next)
|
||||
while(head->next) {
|
||||
head = head->next;
|
||||
}
|
||||
|
||||
audio_codec.last_packet_id = head->packet_id;
|
||||
const auto ordered = !audio_codec.pending_buffers || packet_id_less(audio_codec.last_packet_id, audio_codec.pending_buffers->packet_id, 10);
|
||||
@ -570,10 +614,24 @@ void VoiceClient::event_execute(const std::chrono::system_clock::time_point &sch
|
||||
}
|
||||
}
|
||||
lock.unlock();
|
||||
|
||||
while(replay_head) {
|
||||
if(replay_head->buffer.empty()) {
|
||||
this->set_state(state::stopping);
|
||||
log_debug(category::voice_connection, tr("Client {} send a stop signal. Flushing stream and stopping"), this->client_id_);
|
||||
switch(this->state_) {
|
||||
case state::playing:
|
||||
case state::buffering:
|
||||
this->set_state(state::stopping);
|
||||
log_debug(category::voice_connection, tr("Client {} send a stop signal. Flushing stream and stopping"), this->client_id_);
|
||||
break;
|
||||
|
||||
case state::stopping:
|
||||
case state::stopped:
|
||||
break;
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
auto lost_packets = packet_id_diff(local_last_pid, replay_head->packet_id) - 1;
|
||||
if(lost_packets > 10) {
|
||||
@ -591,16 +649,34 @@ void VoiceClient::event_execute(const std::chrono::system_clock::time_point &sch
|
||||
}
|
||||
}
|
||||
|
||||
const auto is_new_audio_stream = this->state_ != state::buffering && this->state_ != state::playing;
|
||||
bool is_new_audio_stream;
|
||||
switch(this->state_) {
|
||||
case state::stopped:
|
||||
case state::stopping:
|
||||
is_new_audio_stream = true;
|
||||
break;
|
||||
|
||||
case state::buffering:
|
||||
case state::playing:
|
||||
is_new_audio_stream = false;
|
||||
break;
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
is_new_audio_stream = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if(replay_head->reset_decoder || is_new_audio_stream) {
|
||||
audio_codec.converter->reset_decoder();
|
||||
replay_head->reset_decoder = false;
|
||||
|
||||
#if 1 /* Better approch */
|
||||
/* initialize with last packet */
|
||||
char target_buffer[target_buffer_length];
|
||||
if(target_buffer_length > audio_codec.converter->expected_decoded_length(replay_head->buffer.data_ptr(), replay_head->buffer.length())) {
|
||||
audio_codec.converter->decode(error, replay_head->buffer.data_ptr(), replay_head->buffer.length(), target_buffer, 1);
|
||||
static constexpr auto kTempBufferLength{16384};
|
||||
char temp_target_buffer[kTempBufferLength];
|
||||
if(kTempBufferLength >= audio_codec.converter->expected_decoded_length(replay_head->buffer.data_ptr(), replay_head->buffer.length())) {
|
||||
audio_codec.converter->decode(error, replay_head->buffer.data_ptr(), replay_head->buffer.length(), temp_target_buffer, true);
|
||||
} else {
|
||||
//TODO: May a small warning here?
|
||||
}
|
||||
@ -629,6 +705,7 @@ void VoiceClient::event_execute(const std::chrono::system_clock::time_point &sch
|
||||
|
||||
//this->output_source->enqueue_silence((size_t) ceil(0.0075f * (float) this->output_source->sample_rate)); /* enqueue 7.5ms silence so we give the next packet a chance to be send */
|
||||
}
|
||||
|
||||
this->output_source->enqueue_samples(decoded->sample_data, decoded->sample_size);
|
||||
this->set_state(state::playing);
|
||||
}
|
||||
@ -640,9 +717,13 @@ void VoiceClient::event_execute(const std::chrono::system_clock::time_point &sch
|
||||
replay_head = replay_head->next;
|
||||
delete last_head;
|
||||
}
|
||||
lock.lock(); //Check for more packets
|
||||
//TODO: Check for timeout?
|
||||
} while(audio_codec.process_pending);
|
||||
|
||||
/*
|
||||
* Needs to be locked when entering the loop.
|
||||
* We'll check for more packets.
|
||||
*/
|
||||
lock.lock();
|
||||
};
|
||||
}
|
||||
|
||||
if(reschedule) {
|
||||
@ -679,7 +760,7 @@ void VoiceClient::initialize_code(const codec::value &audio_codec) {
|
||||
return;
|
||||
}
|
||||
|
||||
codec_data.resampler = make_shared<audio::AudioResampler>(codec_data.converter->sample_rate(), this->output_source->sample_rate, this->output_source->channel_count);
|
||||
codec_data.resampler = make_shared<audio::AudioResampler>(codec_data.converter->sample_rate(), this->output_source->sample_rate(), this->output_source->channel_count());
|
||||
if(!codec_data.resampler->valid()) {
|
||||
log_warn(category::voice_connection, tr("Failed to initialized codec {} for client {}. Failed to initialize resampler"), audio_codec, this->client_id_);
|
||||
return;
|
||||
@ -699,24 +780,25 @@ std::shared_ptr<audio::SampleBuffer> VoiceClient::decode_buffer(const codec::val
|
||||
}
|
||||
|
||||
string error;
|
||||
char target_buffer[target_buffer_length];
|
||||
if(target_buffer_length < codec_data.converter->expected_decoded_length(buffer.data_ptr(), buffer.length())) {
|
||||
log_warn(category::voice_connection, tr("Failed to decode audio data. Target buffer is smaller then expected bytes ({} < {})"), target_buffer_length, codec_data.converter->expected_decoded_length(buffer.data_ptr(), buffer.length()));
|
||||
char target_buffer[TEMP_BUFFER_LENGTH];
|
||||
if(TEMP_BUFFER_LENGTH < codec_data.converter->expected_decoded_length(buffer.data_ptr(), buffer.length())) {
|
||||
log_warn(category::voice_connection, tr("Failed to decode audio data. Target buffer is smaller then expected bytes ({} < {})"), TEMP_BUFFER_LENGTH, codec_data.converter->expected_decoded_length(buffer.data_ptr(), buffer.length()));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto samples = codec_data.converter->decode(error, buffer.data_ptr(), buffer.length(), target_buffer, fec);
|
||||
if(samples < 0) {
|
||||
log_warn(category::voice_connection, tr("Failed to decode audio data: {}"), error);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if(!audio::merge::merge_channels_interleaved(target_buffer, this->output_source->channel_count, target_buffer, codec_data.converter->channels(), samples)) {
|
||||
if(!audio::merge::merge_channels_interleaved(target_buffer, this->output_source->channel_count(), target_buffer, codec_data.converter->channels(), samples)) {
|
||||
log_warn(category::voice_connection, tr("Failed to merge channels to output stream channel count!"));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if(target_buffer_length < codec_data.resampler->estimated_output_size(samples) * this->output_source->channel_count * sizeof(float)) {
|
||||
log_warn(category::voice_connection, tr("Failed to resample audio data. Target buffer is smaller then expected bytes ({} < {})"), target_buffer_length, (codec_data.resampler->estimated_output_size(samples) * this->output_source->channel_count * 4));
|
||||
if(TEMP_BUFFER_LENGTH < codec_data.resampler->estimated_output_size(samples) * this->output_source->channel_count() * sizeof(float)) {
|
||||
log_warn(category::voice_connection, tr("Failed to resample audio data. Target buffer is smaller then expected bytes ({} < {})"), TEMP_BUFFER_LENGTH, (codec_data.resampler->estimated_output_size(samples) * this->output_source->channel_count() * 4));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -726,17 +808,65 @@ std::shared_ptr<audio::SampleBuffer> VoiceClient::decode_buffer(const codec::val
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
audio::apply_gain(target_buffer, this->output_source->channel_count, resampled_samples, this->volume_);
|
||||
auto audio_buffer = audio::SampleBuffer::allocate((uint8_t) this->output_source->channel_count, (uint16_t) resampled_samples);
|
||||
audio::apply_gain(target_buffer, this->output_source->channel_count(), resampled_samples, this->volume_);
|
||||
auto audio_buffer = audio::SampleBuffer::allocate((uint8_t) this->output_source->channel_count(), (uint16_t) resampled_samples);
|
||||
|
||||
audio_buffer->sample_index = 0;
|
||||
memcpy(audio_buffer->sample_data, target_buffer, this->output_source->channel_count * resampled_samples * 4);
|
||||
memcpy(audio_buffer->sample_data, target_buffer, this->output_source->channel_count() * resampled_samples * 4);
|
||||
return audio_buffer;
|
||||
}
|
||||
|
||||
void VoiceClient::event_execute_dropped(const std::chrono::system_clock::time_point &point) {
|
||||
if(audio_decode_event_dropped.exchange(true)) {
|
||||
//Is not really a warning, it happens all the time and isn't really an issue
|
||||
//log_warn(category::voice_connection, tr("Dropped auto enqueue event execution two or more times in a row for client {}"), this->_client_id);
|
||||
}
|
||||
}
|
||||
void VoiceClient::event_execute_dropped(const std::chrono::system_clock::time_point &point) { }
|
||||
|
||||
/*
|
||||
* This method will be called within the audio event loop.
|
||||
*/
|
||||
bool VoiceClient::handle_output_underflow(size_t sample_count) {
|
||||
switch (this->state_) {
|
||||
case state::stopping:
|
||||
/*
|
||||
* No more data to play out.
|
||||
* We've successfully replayed our queue and are now in stopped state.
|
||||
*/
|
||||
this->set_state(state::stopped);
|
||||
break;
|
||||
|
||||
case state::stopped:
|
||||
/*
|
||||
* We don't really care.
|
||||
* We have no audio to play back.
|
||||
*/
|
||||
break;
|
||||
|
||||
case state::playing:
|
||||
/*
|
||||
* We're missing audio data.
|
||||
* Lets go back to buffering.
|
||||
*/
|
||||
this->set_state(state::buffering);
|
||||
break;
|
||||
|
||||
case state::buffering:
|
||||
/*
|
||||
* Seems like we don't have any data for a bit longer already.
|
||||
* Lets check if we timeout this stream.
|
||||
*/
|
||||
if(this->_last_received_packet + std::chrono::seconds{1} < std::chrono::system_clock::now()) {
|
||||
this->set_state(state::stopped);
|
||||
log_warn(category::audio, tr("Clients {} audio stream timed out. We haven't received any audio packed within the last second. Stopping replay."), this->client_id_, sample_count);
|
||||
break;
|
||||
} else {
|
||||
/*
|
||||
* Lets wait until we have the next audio packet.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* We haven't filled up the buffer.
|
||||
*/
|
||||
return false;
|
||||
}
|
@ -9,6 +9,7 @@
|
||||
#include "../../audio/codec/Converter.h"
|
||||
#include "../../audio/AudioOutput.h"
|
||||
#include "../../EventLoop.h"
|
||||
#include "../../logger.h"
|
||||
|
||||
namespace tc::connection {
|
||||
class ServerConnection;
|
||||
@ -31,16 +32,17 @@ namespace tc::connection {
|
||||
MAX = 5,
|
||||
};
|
||||
|
||||
struct condec_info {
|
||||
struct CodecInfo {
|
||||
bool supported;
|
||||
std::string name;
|
||||
std::function<std::shared_ptr<audio::codec::Converter>(std::string&)> new_converter;
|
||||
};
|
||||
|
||||
extern const condec_info info[6];
|
||||
inline const condec_info* get_info(value codec) {
|
||||
if(codec > value::MAX || codec < value::MIN)
|
||||
extern const CodecInfo info[6];
|
||||
inline const CodecInfo* get_info(value codec) {
|
||||
if(codec > value::MAX || codec < value::MIN) {
|
||||
return nullptr;
|
||||
}
|
||||
return &info[codec];
|
||||
}
|
||||
}
|
||||
@ -63,13 +65,20 @@ namespace tc::connection {
|
||||
stopping,
|
||||
stopped
|
||||
};
|
||||
|
||||
constexpr static std::array names = {
|
||||
"buffering",
|
||||
"playing",
|
||||
"stopping",
|
||||
"stopped"
|
||||
};
|
||||
};
|
||||
VoiceClient(const std::shared_ptr<VoiceConnection>& /* connection */, uint16_t /* client id */);
|
||||
virtual ~VoiceClient();
|
||||
|
||||
void initialize();
|
||||
|
||||
inline uint16_t client_id() const { return this->client_id_; }
|
||||
[[nodiscard]] inline uint16_t client_id() const { return this->client_id_; }
|
||||
|
||||
void initialize_js_object();
|
||||
void finalize_js_object();
|
||||
@ -119,9 +128,9 @@ namespace tc::connection {
|
||||
codec::value codec{};
|
||||
|
||||
uint16_t last_packet_id{0xFFFF}; /* the first packet id is 0 so one packet before is 0xFFFF */
|
||||
std::chrono::system_clock::time_point last_packet_timestamp;
|
||||
std::chrono::system_clock::time_point last_packet_timestamp{};
|
||||
|
||||
inline std::chrono::system_clock::time_point stream_timeout() const {
|
||||
[[nodiscard]] inline std::chrono::system_clock::time_point stream_timeout() const {
|
||||
return this->last_packet_timestamp + std::chrono::milliseconds{1000};
|
||||
}
|
||||
|
||||
@ -142,31 +151,36 @@ namespace tc::connection {
|
||||
void initialize_code(const codec::value& /* codec */);
|
||||
|
||||
/* might be null (if audio hasn't been initialized) */
|
||||
std::shared_ptr<audio::AudioOutputSource> output_source;
|
||||
std::shared_ptr<audio::AudioOutputSource> output_source{};
|
||||
|
||||
std::weak_ptr<VoiceClient> ref_;
|
||||
v8::Persistent<v8::Object> js_handle_;
|
||||
std::weak_ptr<VoiceClient> ref_{};
|
||||
v8::Persistent<v8::Object> js_handle_{};
|
||||
|
||||
uint16_t client_id_{0};
|
||||
float volume_{1.f};
|
||||
|
||||
std::chrono::system_clock::time_point _last_received_packet;
|
||||
state::value state_ = state::stopped;
|
||||
std::chrono::system_clock::time_point _last_received_packet{};
|
||||
state::value state_{state::stopped};
|
||||
inline void set_state(state::value value) {
|
||||
if(value == this->state_) {
|
||||
return;
|
||||
}
|
||||
|
||||
log_warn(category::audio, tr("Client {} state changed from {} to {}"), this->client_id_, state::names[this->state_], state::names[value]);
|
||||
this->state_ = value;
|
||||
if(this->on_state_changed) {
|
||||
this->on_state_changed();
|
||||
}
|
||||
}
|
||||
|
||||
std::atomic_bool audio_decode_event_dropped{false};
|
||||
/* Call only within the event loop or when execute lock is locked */
|
||||
void drop_enqueued_buffers();
|
||||
|
||||
void event_execute(const std::chrono::system_clock::time_point &point) override;
|
||||
void event_execute_dropped(const std::chrono::system_clock::time_point &point) override;
|
||||
|
||||
bool handle_output_underflow(size_t sample_count);
|
||||
|
||||
/* its recommend to call this in correct packet oder */
|
||||
std::shared_ptr<audio::SampleBuffer> decode_buffer(const codec::value& /* codec */,const pipes::buffer_view& /* buffer */, bool /* fec */);
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user