Fixed some audio bugs and improved the audio output player
This commit is contained in:
parent
b475b6db45
commit
1aab9e630a
@ -1,6 +1,5 @@
|
||||
set(MODULE_NAME "teaclient_connection")
|
||||
|
||||
#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address -fno-omit-frame-pointer -static-libasan -lasan -lubsan")
|
||||
set(SOURCE_FILES
|
||||
src/logger.cpp
|
||||
src/EventLoop.cpp
|
||||
@ -15,10 +14,12 @@ set(SOURCE_FILES
|
||||
src/audio/AudioSamples.cpp
|
||||
src/audio/AudioMerger.cpp
|
||||
src/audio/AudioOutput.cpp
|
||||
src/audio/AudioOutputSource.cpp
|
||||
src/audio/AudioInput.cpp
|
||||
src/audio/AudioResampler.cpp
|
||||
src/audio/AudioReframer.cpp
|
||||
src/audio/AudioEventLoop.cpp
|
||||
src/audio/AudioInterleaved.cpp
|
||||
|
||||
src/audio/filter/FilterVad.cpp
|
||||
src/audio/filter/FilterThreshold.cpp
|
||||
@ -27,6 +28,8 @@ set(SOURCE_FILES
|
||||
src/audio/codec/Converter.cpp
|
||||
src/audio/codec/OpusConverter.cpp
|
||||
|
||||
src/audio/processing/AudioProcessor.cpp
|
||||
|
||||
src/audio/driver/AudioDriver.cpp
|
||||
src/audio/sounds/SoundPlayer.cpp
|
||||
src/audio/file/wav.cpp
|
||||
@ -114,22 +117,21 @@ include_directories(${Ed25519_INCLUDE_DIR})
|
||||
find_package(rnnoise REQUIRED)
|
||||
|
||||
if (WIN32)
|
||||
add_compile_options(/NODEFAULTLIB:ThreadPoolStatic)
|
||||
add_definitions(-DWINDOWS) #Required by ThreadPool
|
||||
add_definitions(-D_CRT_SECURE_NO_WARNINGS) # Let windows allow strerror
|
||||
# add_compile_options(/NODEFAULTLIB:ThreadPoolStatic)
|
||||
# add_definitions(-DWINDOWS) #Required by ThreadPool
|
||||
# add_definitions(-D_CRT_SECURE_NO_WARNINGS) # Let windows allow strerror
|
||||
add_definitions(-D_SILENCE_CXX17_OLD_ALLOCATOR_MEMBERS_DEPRECATION_WARNING) # For the FMT library
|
||||
endif ()
|
||||
|
||||
find_package(Soxr REQUIRED)
|
||||
include_directories(${soxr_INCLUDE_DIR})
|
||||
include_directories(${Soxr_INCLUDE_DIR})
|
||||
|
||||
find_package(fvad REQUIRED)
|
||||
include_directories(${fvad_INCLUDE_DIR})
|
||||
|
||||
find_package(Opus REQUIRED)
|
||||
include_directories(${opus_INCLUDE_DIR})
|
||||
|
||||
find_package(spdlog REQUIRED)
|
||||
find_package(WebRTCAudioProcessing REQUIRED)
|
||||
|
||||
set(REQUIRED_LIBRARIES
|
||||
${TeaSpeak_SharedLib_LIBRARIES_STATIC}
|
||||
@ -138,11 +140,12 @@ set(REQUIRED_LIBRARIES
|
||||
${TomMath_LIBRARIES_STATIC}
|
||||
|
||||
libevent::core
|
||||
webrtc::audio::processing
|
||||
|
||||
DataPipes::core::static
|
||||
${soxr_LIBRARIES_STATIC}
|
||||
${Soxr_LIBRARIES_STATIC}
|
||||
${fvad_LIBRARIES_STATIC}
|
||||
${opus_LIBRARIES_STATIC}
|
||||
opus::static
|
||||
|
||||
${Ed25519_LIBRARIES_STATIC}
|
||||
rnnoise
|
||||
|
@ -7,13 +7,14 @@
|
||||
#include "../logger.h"
|
||||
|
||||
bool tc::audio::apply_gain(void *vp_buffer, size_t channel_count, size_t sample_count, float gain) {
|
||||
if(gain == 1.f)
|
||||
if(gain == 1.f) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool audio_clipped{false};
|
||||
auto buffer = (float*) vp_buffer;
|
||||
auto elements_left = channel_count * sample_count;
|
||||
while (elements_left--) {
|
||||
auto buffer_end = buffer + channel_count * sample_count;
|
||||
while (buffer != buffer_end) {
|
||||
auto& value = *buffer++;
|
||||
value *= gain;
|
||||
|
||||
|
@ -17,7 +17,7 @@ AudioConsumer::AudioConsumer(tc::audio::AudioInput *handle, size_t channel_count
|
||||
sample_rate(sample_rate) ,
|
||||
frame_size(frame_size) {
|
||||
if(this->frame_size > 0) {
|
||||
this->reframer = std::make_unique<Reframer>(channel_count, frame_size);
|
||||
this->reframer = std::make_unique<InputReframer>(channel_count, frame_size);
|
||||
this->reframer->on_frame = [&](const void* buffer) { this->handle_framed_data(buffer, this->frame_size); };
|
||||
}
|
||||
}
|
||||
@ -150,13 +150,13 @@ void AudioInput::consume(const void *input, size_t frameCount, size_t channels)
|
||||
const auto expected_byte_size = expected_size * this->_channel_count * sizeof(float);
|
||||
this->ensure_resample_buffer_capacity(expected_byte_size);
|
||||
|
||||
auto result = this->_resampler->process(this->resample_buffer, input, frameCount);
|
||||
if(result < 0) {
|
||||
log_error(category::audio, tr("Failed to resample input audio: {}"), result);
|
||||
size_t sample_count{expected_size};
|
||||
if(!this->_resampler->process(this->resample_buffer, input, frameCount, sample_count)) {
|
||||
log_error(category::audio, tr("Failed to resample input audio."));
|
||||
return;
|
||||
}
|
||||
|
||||
frameCount = (size_t) result;
|
||||
frameCount = sample_count;
|
||||
input = this->resample_buffer;
|
||||
|
||||
audio::apply_gain(this->resample_buffer, this->_channel_count, frameCount, this->_volume);
|
||||
|
@ -11,7 +11,7 @@
|
||||
class AudioInputSource;
|
||||
namespace tc::audio {
|
||||
class AudioInput;
|
||||
class Reframer;
|
||||
class InputReframer;
|
||||
class AudioResampler;
|
||||
|
||||
class AudioConsumer {
|
||||
@ -29,7 +29,7 @@ namespace tc::audio {
|
||||
private:
|
||||
AudioConsumer(AudioInput* handle, size_t channel_count, size_t sample_rate, size_t frame_size);
|
||||
|
||||
std::unique_ptr<Reframer> reframer;
|
||||
std::unique_ptr<InputReframer> reframer;
|
||||
|
||||
void process_data(const void* /* buffer */, size_t /* samples */);
|
||||
void handle_framed_data(const void* /* buffer */, size_t /* samples */);
|
||||
|
41
native/serverconnection/src/audio/AudioInterleaved.cpp
Normal file
41
native/serverconnection/src/audio/AudioInterleaved.cpp
Normal file
@ -0,0 +1,41 @@
|
||||
//
|
||||
// Created by WolverinDEV on 27/03/2021.
|
||||
//
|
||||
|
||||
#include <cassert>
|
||||
#include "AudioInterleaved.h"
|
||||
|
||||
using namespace tc;
|
||||
|
||||
constexpr static auto kMaxChannelCount{32};
|
||||
void audio::deinterleave(float *target, const float *source, size_t channel_count, size_t sample_count) {
|
||||
assert(channel_count <= kMaxChannelCount);
|
||||
assert(source != target);
|
||||
|
||||
float* target_ptr[kMaxChannelCount];
|
||||
for(size_t channel{0}; channel < channel_count; channel++) {
|
||||
target_ptr[channel] = target + (channel * sample_count);
|
||||
}
|
||||
|
||||
for(size_t sample{0}; sample < sample_count; sample++) {
|
||||
for(size_t channel{0}; channel < channel_count; channel++) {
|
||||
*target_ptr[channel]++ = *source++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void audio::interleave(float *target, const float *source, size_t channel_count, size_t sample_count) {
|
||||
assert(channel_count <= kMaxChannelCount);
|
||||
assert(source != target);
|
||||
|
||||
const float* source_ptr[kMaxChannelCount];
|
||||
for(size_t channel{0}; channel < channel_count; channel++) {
|
||||
source_ptr[channel] = source + (channel * sample_count);
|
||||
}
|
||||
|
||||
for(size_t sample{0}; sample < sample_count; sample++) {
|
||||
for(size_t channel{0}; channel < channel_count; channel++) {
|
||||
*target++ = *source_ptr[channel]++;
|
||||
}
|
||||
}
|
||||
}
|
17
native/serverconnection/src/audio/AudioInterleaved.h
Normal file
17
native/serverconnection/src/audio/AudioInterleaved.h
Normal file
@ -0,0 +1,17 @@
|
||||
#pragma once
|
||||
|
||||
namespace tc::audio {
|
||||
extern void deinterleave(
|
||||
float* /* dest */,
|
||||
const float * /* source */,
|
||||
size_t /* channel count */,
|
||||
size_t /* sample count */
|
||||
);
|
||||
|
||||
extern void interleave(
|
||||
float* /* dest */,
|
||||
const float * /* source */,
|
||||
size_t /* channel count */,
|
||||
size_t /* sample count */
|
||||
);
|
||||
}
|
@ -8,23 +8,24 @@ using namespace tc::audio;
|
||||
|
||||
/* technique based on http://www.vttoth.com/CMS/index.php/technical-notes/68 */
|
||||
inline constexpr float merge_ab(float a, float b) {
|
||||
/*
|
||||
* Form: A,B := [0;n]
|
||||
* Z = 2(A + B) - (2/n) * A * B - n
|
||||
*
|
||||
* For a range from 0 to 2: Z = 2(A + B) - AB - 2
|
||||
*/
|
||||
/*
|
||||
* Form: A,B := [0;n]
|
||||
* Z = 2(A + B) - (2/n) * A * B - n
|
||||
*
|
||||
* For a range from 0 to 2: Z = 2(A + B) - AB - 2
|
||||
*/
|
||||
|
||||
a += 1;
|
||||
b += 1;
|
||||
a += 1;
|
||||
b += 1;
|
||||
|
||||
auto result = (2 * (a + b) - a * b - 2) - 1;
|
||||
if(result > 1) {
|
||||
result = 1;
|
||||
} else if(result < -1) {
|
||||
result = -1;
|
||||
float result{};
|
||||
if(a < 1 && b < 1) {
|
||||
result = a * b;
|
||||
} else {
|
||||
result = 2 * (a + b) - a * b - 2;
|
||||
}
|
||||
|
||||
result -= 1;
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -32,13 +33,14 @@ static_assert(merge_ab(1, 0) == 1);
|
||||
static_assert(merge_ab(0, 1) == 1);
|
||||
static_assert(merge_ab(1, 1) == 1);
|
||||
|
||||
bool merge::merge_sources(void *_dest, void *_src_a, void *_src_b, size_t channels, size_t samples) {
|
||||
auto dest = (float*) _dest;
|
||||
auto src_a = (float*) _src_a;
|
||||
auto src_b = (float*) _src_b;
|
||||
bool merge::merge_sources(void *dest_, void *src_a_, void *src_b_, size_t channels, size_t samples) {
|
||||
auto dest = (float*) dest_;
|
||||
auto src_a = (float*) src_a_;
|
||||
auto src_b = (float*) src_b_;
|
||||
|
||||
for(size_t index = 0; index < channels * samples; index++)
|
||||
dest[index] = merge_ab(src_a[index], src_b[index]);
|
||||
for(size_t index = 0; index < channels * samples; index++) {
|
||||
dest[index] = merge_ab(src_a[index], src_b[index]);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1,8 +1,10 @@
|
||||
#include "./AudioOutput.h"
|
||||
#include "./AudioMerger.h"
|
||||
#include "./AudioResampler.h"
|
||||
#include "./AudioInterleaved.h"
|
||||
#include "./AudioGain.h"
|
||||
#include "./processing/AudioProcessor.h"
|
||||
#include "../logger.h"
|
||||
#include "AudioGain.h"
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
@ -11,311 +13,10 @@ using namespace std;
|
||||
using namespace tc;
|
||||
using namespace tc::audio;
|
||||
|
||||
void AudioOutputSource::clear() {
|
||||
std::lock_guard buffer_lock{this->buffer_mutex};
|
||||
this->buffer.clear();
|
||||
this->buffer_state = BufferState::buffering;
|
||||
this->fadeout_samples_left = 0;
|
||||
AudioOutput::AudioOutput(size_t channels, size_t rate) : channel_count_{channels}, sample_rate_{rate} {
|
||||
assert(this->sample_rate_ % kChunkTimeMs == 0);
|
||||
}
|
||||
|
||||
void AudioOutputSource::apply_fadeout() {
|
||||
const auto samples_available = this->currently_buffered_samples();
|
||||
auto fade_samples = std::min(samples_available, this->fadeout_frame_samples_);
|
||||
if(!fade_samples) {
|
||||
this->fadeout_samples_left = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
const auto sample_byte_size = this->channel_count_ * sizeof(float) * fade_samples;
|
||||
assert(this->buffer.fill_count() >= sample_byte_size);
|
||||
auto write_ptr = (float*) ((char*) this->buffer.read_ptr() + (this->buffer.fill_count() - sample_byte_size));
|
||||
|
||||
for(size_t index{0}; index < fade_samples; index++) {
|
||||
const auto offset = (float) ((float) (index + 1) / (float) fade_samples);
|
||||
const auto volume = std::min(log10f(offset) / -2.71828182845904f, 1.f);
|
||||
|
||||
for(int channel{0}; channel < this->channel_count_; channel++) {
|
||||
*write_ptr++ *= volume;
|
||||
}
|
||||
}
|
||||
|
||||
this->fadeout_samples_left = fade_samples;
|
||||
}
|
||||
|
||||
void AudioOutputSource::apply_fadein() {
|
||||
assert(this->currently_buffered_samples() >= this->fadeout_samples_left);
|
||||
const auto samples_available = this->currently_buffered_samples();
|
||||
auto fade_samples = std::min(samples_available - this->fadeout_samples_left, this->fadein_frame_samples_);
|
||||
if(!fade_samples) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: We're using the read_ptr() here in order to correctly apply the effect.
|
||||
* This isn't really best practice but works.
|
||||
*/
|
||||
auto write_ptr = (float*) this->buffer.read_ptr() + this->fadeout_samples_left * this->channel_count_;
|
||||
for(size_t index{0}; index < fade_samples; index++) {
|
||||
const auto offset = (float) ((float) (index + 1) / (float) fade_samples);
|
||||
const auto volume = std::min(log10f(1 - offset) / -2.71828182845904f, 1.f);
|
||||
|
||||
for(int channel{0}; channel < this->channel_count_; channel++) {
|
||||
*write_ptr++ *= volume;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool AudioOutputSource::pop_samples(void *target_buffer, size_t target_sample_count) {
|
||||
std::unique_lock buffer_lock{this->buffer_mutex};
|
||||
auto result = this->pop_samples_(target_buffer, target_sample_count);
|
||||
buffer_lock.unlock();
|
||||
|
||||
if(auto callback{this->on_read}; callback) {
|
||||
callback();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool AudioOutputSource::pop_samples_(void *target_buffer, size_t target_sample_count) {
|
||||
switch(this->buffer_state) {
|
||||
case BufferState::fadeout: {
|
||||
/* Write as much we can */
|
||||
const auto write_samples = std::min(this->fadeout_samples_left, target_sample_count);
|
||||
const auto write_byte_size = write_samples * this->channel_count_ * sizeof(float);
|
||||
memcpy(target_buffer, this->buffer.read_ptr(), write_byte_size);
|
||||
this->buffer.advance_read_ptr(write_byte_size);
|
||||
|
||||
/* Fill the rest with silence */
|
||||
const auto empty_samples = target_sample_count - write_samples;
|
||||
const auto empty_byte_size = empty_samples * this->channel_count_ * sizeof(float);
|
||||
memset((char*) target_buffer + write_byte_size, 0, empty_byte_size);
|
||||
|
||||
this->fadeout_samples_left -= write_samples;
|
||||
if(!this->fadeout_samples_left) {
|
||||
log_trace(category::audio, tr("{} Successfully replayed fadeout sequence."), (void*) this);
|
||||
this->buffer_state = BufferState::buffering;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
case BufferState::playing: {
|
||||
const auto buffered_samples = this->currently_buffered_samples();
|
||||
if(buffered_samples < target_sample_count + this->fadeout_frame_samples_) {
|
||||
const auto missing_samples = target_sample_count + this->fadeout_frame_samples_ - buffered_samples;
|
||||
if(auto callback{this->on_underflow}; callback) {
|
||||
if(callback(missing_samples)) {
|
||||
/* We've been filled up again. Trying again to fill the output buffer. */
|
||||
return this->pop_samples(target_buffer, target_sample_count);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* When consuming target_sample_count amount samples of our buffer we could not
|
||||
* apply the fadeout effect any more. Instead we're applying it now and returning to buffering state.
|
||||
*/
|
||||
this->apply_fadeout();
|
||||
|
||||
/* Write the rest of unmodified buffer */
|
||||
const auto write_samples = buffered_samples - this->fadeout_samples_left;
|
||||
assert(write_samples <= target_sample_count);
|
||||
const auto write_byte_size = write_samples * this->channel_count_ * sizeof(float);
|
||||
memcpy(target_buffer, this->buffer.read_ptr(), write_byte_size);
|
||||
this->buffer.advance_read_ptr(write_byte_size);
|
||||
|
||||
log_trace(category::audio, tr("{} Starting stream fadeout. Requested samples {}, Buffered samples: {}, Fadeout frame samples: {}, Returned normal samples: {}"),
|
||||
(void*) this, target_sample_count, buffered_samples, this->fadeout_frame_samples_, write_samples
|
||||
);
|
||||
|
||||
this->buffer_state = BufferState::fadeout;
|
||||
if(write_samples < target_sample_count) {
|
||||
/* Fill the rest of the buffer with the fadeout content */
|
||||
this->pop_samples((char*) target_buffer + write_byte_size, target_sample_count - write_samples);
|
||||
}
|
||||
} else {
|
||||
/* We can just normally copy the buffer */
|
||||
const auto write_byte_size = target_sample_count * this->channel_count_ * sizeof(float);
|
||||
memcpy(target_buffer, this->buffer.read_ptr(), write_byte_size);
|
||||
this->buffer.advance_read_ptr(write_byte_size);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
case BufferState::buffering:
|
||||
/* Nothing to replay */
|
||||
return false;
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ssize_t AudioOutputSource::enqueue_samples(const void *source_buffer, size_t sample_count) {
|
||||
std::lock_guard buffer_lock{this->buffer_mutex};
|
||||
return this->enqueue_samples_(source_buffer, sample_count);
|
||||
}
|
||||
|
||||
ssize_t AudioOutputSource::enqueue_samples_(const void *source_buffer, size_t sample_count) {
|
||||
switch(this->buffer_state) {
|
||||
case BufferState::fadeout:
|
||||
case BufferState::buffering: {
|
||||
assert(this->currently_buffered_samples() >= this->fadeout_samples_left);
|
||||
assert(this->min_buffered_samples_ >= this->currently_buffered_samples() - this->fadeout_samples_left);
|
||||
const auto missing_samples = this->min_buffered_samples_ - (this->currently_buffered_samples() - this->fadeout_samples_left);
|
||||
const auto write_sample_count = std::min(missing_samples, sample_count);
|
||||
const auto write_byte_size = write_sample_count * this->channel_count_ * sizeof(float);
|
||||
|
||||
assert(write_sample_count <= this->max_supported_buffering());
|
||||
memcpy(this->buffer.write_ptr(), source_buffer, write_byte_size);
|
||||
this->buffer.advance_write_ptr(write_byte_size);
|
||||
|
||||
if(sample_count < missing_samples) {
|
||||
/* we still need to buffer */
|
||||
return sample_count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Even though we still have fadeout samples left we don't declare them as such since we've already fulled
|
||||
* our future buffer.
|
||||
*/
|
||||
this->fadeout_samples_left = 0;
|
||||
|
||||
/* buffering finished */
|
||||
log_trace(category::audio, tr("{} Finished buffering {} samples. Fading them in."), (void*) this, this->min_buffered_samples_);
|
||||
this->apply_fadein();
|
||||
this->buffer_state = BufferState::playing;
|
||||
if(sample_count > missing_samples) {
|
||||
/* we've more data to write */
|
||||
return this->enqueue_samples((const char*) source_buffer + write_byte_size, sample_count - missing_samples) + write_sample_count;
|
||||
} else {
|
||||
return write_sample_count;
|
||||
}
|
||||
}
|
||||
|
||||
case BufferState::playing: {
|
||||
const auto buffered_samples = this->currently_buffered_samples();
|
||||
|
||||
const auto write_sample_count = std::min(this->max_supported_buffering() - buffered_samples, sample_count);
|
||||
const auto write_byte_size = write_sample_count * this->channel_count_ * sizeof(float);
|
||||
|
||||
memcpy(this->buffer.write_ptr(), source_buffer, write_byte_size);
|
||||
this->buffer.advance_write_ptr(write_byte_size);
|
||||
|
||||
if(write_sample_count < sample_count) {
|
||||
if(auto callback{this->on_overflow}; callback) {
|
||||
callback(sample_count - write_sample_count);
|
||||
}
|
||||
|
||||
switch (this->overflow_strategy) {
|
||||
case OverflowStrategy::discard_input:
|
||||
return -2;
|
||||
|
||||
case OverflowStrategy::discard_buffer_all:
|
||||
this->buffer.clear();
|
||||
break;
|
||||
|
||||
case OverflowStrategy::discard_buffer_half:
|
||||
/* FIXME: This implementation is wrong! */
|
||||
this->buffer.advance_read_ptr(this->buffer.fill_count() / 2);
|
||||
break;
|
||||
|
||||
case OverflowStrategy::ignore:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return write_sample_count;
|
||||
}
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr static auto kMaxStackBuffer{1024 * 8 * sizeof(float)};
|
||||
ssize_t AudioOutputSource::enqueue_samples_no_interleave(const void *source_buffer, size_t samples) {
|
||||
if(this->channel_count_ == 1) {
|
||||
return this->enqueue_samples(source_buffer, samples);
|
||||
} else if(this->channel_count_ == 2) {
|
||||
const auto buffer_byte_size = samples * this->channel_count_ * sizeof(float);
|
||||
if(buffer_byte_size > kMaxStackBuffer) {
|
||||
/* We can't convert to interleave */
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint8_t stack_buffer[kMaxStackBuffer];
|
||||
{
|
||||
auto src_buffer = (const float*) source_buffer;
|
||||
auto target_buffer = (float*) stack_buffer;
|
||||
|
||||
auto samples_to_write = samples;
|
||||
while (samples_to_write-- > 0) {
|
||||
*target_buffer = *src_buffer;
|
||||
*(target_buffer + 1) = *(src_buffer + samples);
|
||||
|
||||
target_buffer += 2;
|
||||
src_buffer++;
|
||||
}
|
||||
}
|
||||
|
||||
return this->enqueue_samples(stack_buffer, samples);
|
||||
} else {
|
||||
/* TODO: Generalize to interleave algo */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool AudioOutputSource::set_max_buffered_samples(size_t samples) {
|
||||
samples = std::max(samples, (size_t) this->fadein_frame_samples_);
|
||||
if(samples > this->max_supported_buffering()) {
|
||||
samples = this->max_supported_buffering();
|
||||
}
|
||||
|
||||
std::lock_guard buffer_lock{this->buffer_mutex};
|
||||
if(samples < this->min_buffered_samples_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
this->max_buffered_samples_ = samples;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AudioOutputSource::set_min_buffered_samples(size_t samples) {
|
||||
samples = std::max(samples, (size_t) this->fadein_frame_samples_);
|
||||
|
||||
std::lock_guard buffer_lock{this->buffer_mutex};
|
||||
if(samples > this->max_buffered_samples_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
this->min_buffered_samples_ = samples;
|
||||
switch(this->buffer_state) {
|
||||
case BufferState::fadeout:
|
||||
case BufferState::buffering: {
|
||||
assert(this->currently_buffered_samples() >= this->fadeout_samples_left);
|
||||
const auto buffered_samples = this->currently_buffered_samples() - this->fadeout_samples_left;
|
||||
if(buffered_samples > this->min_buffered_samples_) {
|
||||
log_trace(category::audio, tr("{} Finished buffering {} samples (due to min buffered sample reduce). Fading them in."), (void*) this, this->min_buffered_samples_);
|
||||
this->apply_fadein();
|
||||
this->buffer_state = BufferState::playing;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
case BufferState::playing:
|
||||
return true;
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
AudioOutput::AudioOutput(size_t channels, size_t rate) : channel_count_(channels), sample_rate_(rate) { }
|
||||
|
||||
AudioOutput::~AudioOutput() {
|
||||
this->close_device();
|
||||
this->cleanup_buffers();
|
||||
@ -330,185 +31,217 @@ std::shared_ptr<AudioOutputSource> AudioOutput::create_source(ssize_t buf) {
|
||||
return result;
|
||||
}
|
||||
|
||||
void AudioOutput::cleanup_buffers() {
|
||||
free(this->source_buffer);
|
||||
free(this->source_merge_buffer);
|
||||
free(this->resample_overhead_buffer);
|
||||
|
||||
this->source_merge_buffer = nullptr;
|
||||
this->source_buffer = nullptr;
|
||||
this->resample_overhead_buffer = nullptr;
|
||||
|
||||
this->source_merge_buffer_length = 0;
|
||||
this->source_buffer_length = 0;
|
||||
this->resample_overhead_buffer_length = 0;
|
||||
this->resample_overhead_samples = 0;
|
||||
void AudioOutput::register_audio_processor(const std::shared_ptr<AudioProcessor> &processor) {
|
||||
std::lock_guard processor_lock{this->processors_mutex};
|
||||
this->audio_processors_.push_back(processor);
|
||||
}
|
||||
|
||||
void AudioOutput::fill_buffer(void *output, size_t out_frame_count, size_t out_channels) {
|
||||
if(out_channels != this->channel_count_) {
|
||||
log_critical(category::audio, tr("Channel count miss match (output)! Expected: {} Received: {}. Fixme!"), this->channel_count_, out_channels);
|
||||
return;
|
||||
bool AudioOutput::unregister_audio_processor(const std::shared_ptr<AudioProcessor> &processor) {
|
||||
std::lock_guard processor_lock{this->processors_mutex};
|
||||
auto index = std::find(this->audio_processors_.begin(), this->audio_processors_.end(), processor);
|
||||
if(index == this->audio_processors_.end()) {
|
||||
return false;
|
||||
}
|
||||
auto local_frame_count = this->resampler_ ? this->resampler_->input_size(out_frame_count) : out_frame_count;
|
||||
void* const original_output{output};
|
||||
|
||||
if(this->resample_overhead_samples > 0) {
|
||||
const auto samples_to_write = this->resample_overhead_samples > out_frame_count ? out_frame_count : this->resample_overhead_samples;
|
||||
const auto byte_length = samples_to_write * sizeof(float) * out_channels;
|
||||
this->audio_processors_.erase(index);
|
||||
return true;
|
||||
}
|
||||
|
||||
if(output) {
|
||||
memcpy(output, this->resample_overhead_buffer, byte_length);
|
||||
void AudioOutput::cleanup_buffers() {
|
||||
free(this->chunk_buffer);
|
||||
free(this->source_merge_buffer);
|
||||
|
||||
this->source_merge_buffer = nullptr;
|
||||
this->source_merge_buffer_length = 0;
|
||||
|
||||
this->chunk_buffer = nullptr;
|
||||
this->chunk_buffer_length = 0;
|
||||
}
|
||||
|
||||
void AudioOutput::ensure_chunk_buffer_space(size_t output_samples) {
|
||||
const auto own_chunk_size = (AudioOutput::kChunkTimeMs * this->sample_rate_ * this->channel_count_) / 1000;
|
||||
const auto min_chunk_byte_size = std::max(own_chunk_size, output_samples * this->current_output_channels) * sizeof(float);
|
||||
|
||||
if(this->chunk_buffer_length < min_chunk_byte_size) {
|
||||
if(this->chunk_buffer) {
|
||||
::free(this->chunk_buffer);
|
||||
}
|
||||
|
||||
if(samples_to_write == out_frame_count) {
|
||||
this->resample_overhead_samples -= samples_to_write;
|
||||
memcpy(this->resample_overhead_buffer, (char*) this->resample_overhead_buffer + byte_length, this->resample_overhead_samples * this->channel_count_ * sizeof(float));
|
||||
this->chunk_buffer = malloc(min_chunk_byte_size);
|
||||
this->chunk_buffer_length = min_chunk_byte_size;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioOutput::fill_buffer(void *output, size_t request_sample_count, size_t out_channels) {
|
||||
assert(output);
|
||||
|
||||
if(out_channels != this->current_output_channels) {
|
||||
log_info(category::audio, tr("Output channel count changed from {} to {}"), this->current_output_channels, out_channels);
|
||||
this->current_output_channels = out_channels;
|
||||
|
||||
/*
|
||||
* Mark buffer as fully replayed and refill it with new data which fits the new channel count.
|
||||
*/
|
||||
this->chunk_buffer_sample_length = 0;
|
||||
this->chunk_buffer_sample_offset = 0;
|
||||
}
|
||||
|
||||
auto remaining_samples{request_sample_count};
|
||||
auto remaining_buffer{output};
|
||||
|
||||
if(this->chunk_buffer_sample_offset < this->chunk_buffer_sample_length) {
|
||||
/*
|
||||
* We can (partially) fill the output buffer with our current chunk.
|
||||
*/
|
||||
|
||||
const auto sample_count = std::min(this->chunk_buffer_sample_length - this->chunk_buffer_sample_offset, request_sample_count);
|
||||
memcpy(output, (float*) this->chunk_buffer + this->chunk_buffer_sample_offset * this->current_output_channels, sample_count * this->current_output_channels * sizeof(float));
|
||||
this->chunk_buffer_sample_offset += sample_count;
|
||||
|
||||
if(sample_count == request_sample_count) {
|
||||
/* We've successfully willed the whole output buffer. */
|
||||
return;
|
||||
} else {
|
||||
this->resample_overhead_samples = 0;
|
||||
output = (char*) output + byte_length;
|
||||
out_frame_count -= samples_to_write;
|
||||
local_frame_count -= this->resampler_ ? this->resampler_->input_size(samples_to_write) : samples_to_write;
|
||||
}
|
||||
|
||||
remaining_samples = request_sample_count - sample_count;
|
||||
remaining_buffer = (float*) output + sample_count * this->current_output_channels;
|
||||
}
|
||||
|
||||
this->fill_chunk_buffer();
|
||||
this->chunk_buffer_sample_offset = 0;
|
||||
|
||||
return this->fill_buffer(remaining_buffer, remaining_samples, out_channels);
|
||||
}
|
||||
|
||||
constexpr static auto kTempChunkBufferSize{64 * 1024};
|
||||
constexpr static auto kMaxChannelCount{32};
|
||||
void AudioOutput::fill_chunk_buffer() {
|
||||
|
||||
const auto chunk_local_sample_count = this->chunk_local_sample_count();
|
||||
assert(chunk_local_sample_count > 0);
|
||||
assert(this->current_output_channels <= kMaxChannelCount);
|
||||
|
||||
std::vector<std::shared_ptr<AudioOutputSource>> sources{};
|
||||
sources.reserve(8);
|
||||
|
||||
std::unique_lock sources_lock{this->sources_mutex};
|
||||
{
|
||||
sources.reserve(this->sources_.size());
|
||||
this->sources_.erase(std::remove_if(this->sources_.begin(), this->sources_.end(), [&](const std::weak_ptr<AudioOutputSource>& weak_source) {
|
||||
auto source = weak_source.lock();
|
||||
if(!source) {
|
||||
return true;
|
||||
}
|
||||
|
||||
sources.push_back(std::move(source));
|
||||
return false;
|
||||
}), this->sources_.end());
|
||||
}
|
||||
|
||||
{
|
||||
size_t actual_sources{0};
|
||||
auto sources_it = sources.begin();
|
||||
auto sources_end = sources.end();
|
||||
|
||||
/* Initialize the buffer */
|
||||
while(sources_it != sources_end) {
|
||||
auto source = *sources_it;
|
||||
sources_it++;
|
||||
|
||||
if(source->pop_samples(this->chunk_buffer, chunk_local_sample_count)) {
|
||||
/* Chunk buffer initialized */
|
||||
actual_sources++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(!actual_sources) {
|
||||
/* We don't have any sources. Just provide silence */
|
||||
sources_lock.unlock();
|
||||
goto zero_chunk_exit;
|
||||
}
|
||||
|
||||
/* Lets merge the rest */
|
||||
uint8_t temp_chunk_buffer[kTempChunkBufferSize];
|
||||
assert(kTempChunkBufferSize >= chunk_local_sample_count * this->channel_count_ * sizeof(float));
|
||||
|
||||
while(sources_it != sources_end) {
|
||||
auto source = *sources_it;
|
||||
sources_it++;
|
||||
|
||||
if(!source->pop_samples(temp_chunk_buffer, chunk_local_sample_count)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
actual_sources++;
|
||||
merge::merge_sources(this->chunk_buffer, this->chunk_buffer, temp_chunk_buffer, this->channel_count_, chunk_local_sample_count);
|
||||
}
|
||||
}
|
||||
sources_lock.unlock();
|
||||
|
||||
if(this->volume_modifier == 0) {
|
||||
goto zero_chunk_exit;
|
||||
} else {
|
||||
audio::apply_gain(this->chunk_buffer, this->channel_count_, chunk_local_sample_count, this->volume_modifier);
|
||||
}
|
||||
|
||||
/* Lets resample our chunk with our sample rate up/down to the device sample rate */
|
||||
if(this->resampler_) {
|
||||
this->chunk_buffer_sample_length = this->resampler_->estimated_output_size(chunk_local_sample_count);
|
||||
this->ensure_chunk_buffer_space(this->chunk_buffer_sample_length);
|
||||
|
||||
if(!this->resampler_->process(this->chunk_buffer, this->chunk_buffer, chunk_local_sample_count, this->chunk_buffer_sample_length)) {
|
||||
log_error(category::audio, tr("Failed to resample audio output."));
|
||||
goto zero_chunk_exit;
|
||||
}
|
||||
|
||||
if(!this->chunk_buffer_sample_length) {
|
||||
/* We need more input to create some resampled output */
|
||||
log_warn(category::audio, tr("Audio output resampling returned zero samples"));
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
this->chunk_buffer_sample_length = chunk_local_sample_count;
|
||||
}
|
||||
|
||||
/* Increase/decrease channel count */
|
||||
if(this->channel_count_ != this->current_output_channels) {
|
||||
if(!merge::merge_channels_interleaved(this->chunk_buffer, this->current_output_channels, this->chunk_buffer, this->channel_count_, this->chunk_buffer_sample_length)) {
|
||||
log_error(category::audio, tr("Failed to adjust channel count for audio output."));
|
||||
goto zero_chunk_exit;
|
||||
}
|
||||
}
|
||||
|
||||
if(!original_output) {
|
||||
this->sources_.erase(std::remove_if(this->sources_.begin(), this->sources_.end(), [&](const std::weak_ptr<AudioOutputSource>& weak_source) {
|
||||
auto source = weak_source.lock();
|
||||
if(!source) {
|
||||
return true;
|
||||
{
|
||||
std::unique_lock processor_lock{this->processors_mutex};
|
||||
auto processors = this->audio_processors_;
|
||||
processor_lock.unlock();
|
||||
|
||||
if(!processors.empty()) {
|
||||
float temp_chunk_buffer[kTempChunkBufferSize / sizeof(float)];
|
||||
assert(kTempChunkBufferSize >= this->current_output_channels * this->chunk_buffer_sample_length * sizeof(float));
|
||||
|
||||
audio::deinterleave(temp_chunk_buffer, (const float*) this->chunk_buffer, this->current_output_channels, this->chunk_buffer_sample_length);
|
||||
webrtc::StreamConfig stream_config{(int) this->playback_->sample_rate(), this->current_output_channels};
|
||||
|
||||
float* channel_ptr[kMaxChannelCount];
|
||||
for(size_t channel{0}; channel < this->current_output_channels; channel++) {
|
||||
channel_ptr[channel] = temp_chunk_buffer + (channel * this->chunk_buffer_sample_length);
|
||||
}
|
||||
|
||||
source->pop_samples(nullptr, local_frame_count);
|
||||
return false;
|
||||
}), this->sources_.end());
|
||||
return;
|
||||
} else if(this->volume_modifier <= 0) {
|
||||
this->sources_.erase(std::remove_if(this->sources_.begin(), this->sources_.end(), [&](const std::weak_ptr<AudioOutputSource>& weak_source) {
|
||||
auto source = weak_source.lock();
|
||||
if(!source) {
|
||||
return true;
|
||||
for(const std::shared_ptr<AudioProcessor>& processor : processors) {
|
||||
processor->analyze_reverse_stream(channel_ptr, stream_config);
|
||||
}
|
||||
|
||||
source->pop_samples(nullptr, local_frame_count);
|
||||
return false;
|
||||
}), this->sources_.end());
|
||||
|
||||
memset(output, 0, local_frame_count * out_channels * sizeof(float));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const size_t local_buffer_length = local_frame_count * 4 * this->channel_count_;
|
||||
const size_t out_buffer_length = out_frame_count * 4 * this->channel_count_;
|
||||
size_t sources = 0;
|
||||
size_t actual_sources;
|
||||
|
||||
{
|
||||
lock_guard sources_lock{this->sources_mutex};
|
||||
sources = this->sources_.size();
|
||||
|
||||
if(sources > 0) {
|
||||
/* allocate the required space */
|
||||
const auto required_source_buffer_length = (out_buffer_length > local_buffer_length ? out_buffer_length : local_buffer_length) * sources; /* ensure enough space for later resample */
|
||||
const auto required_source_merge_buffer_length = sizeof(void*) * sources;
|
||||
|
||||
{
|
||||
|
||||
if(this->source_buffer_length < required_source_buffer_length || !this->source_buffer) {
|
||||
if(this->source_buffer) {
|
||||
free(this->source_buffer);
|
||||
}
|
||||
|
||||
this->source_buffer = malloc(required_source_buffer_length);
|
||||
this->source_buffer_length = required_source_buffer_length;
|
||||
}
|
||||
if(this->source_merge_buffer_length < required_source_merge_buffer_length || !this->source_merge_buffer) {
|
||||
if (this->source_merge_buffer) {
|
||||
free(this->source_merge_buffer);
|
||||
}
|
||||
|
||||
this->source_merge_buffer = (void **) malloc(required_source_merge_buffer_length);
|
||||
this->source_merge_buffer_length = required_source_merge_buffer_length;
|
||||
}
|
||||
}
|
||||
|
||||
size_t index{0};
|
||||
this->sources_.erase(std::remove_if(this->sources_.begin(), this->sources_.end(), [&](const std::weak_ptr<AudioOutputSource>& weak_source) {
|
||||
auto source = weak_source.lock();
|
||||
if(!source) {
|
||||
return true;
|
||||
}
|
||||
|
||||
this->source_merge_buffer[index] = (char*) this->source_buffer + (local_buffer_length * index);
|
||||
if(!source->pop_samples(this->source_merge_buffer[index], local_frame_count)) {
|
||||
this->source_merge_buffer[index] = nullptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
index++;
|
||||
return false;
|
||||
}), this->sources_.end());
|
||||
actual_sources = index;
|
||||
} else {
|
||||
goto clear_buffer_exit;
|
||||
}
|
||||
}
|
||||
|
||||
if(actual_sources > 0) {
|
||||
if(local_frame_count == out_frame_count) {
|
||||
/* Output */
|
||||
if(!merge::merge_n_sources(output, this->source_merge_buffer, actual_sources, this->channel_count_, local_frame_count)) {
|
||||
log_warn(category::audio, tr("failed to merge buffers!"));
|
||||
}
|
||||
} else {
|
||||
if(!merge::merge_n_sources(this->source_buffer, this->source_merge_buffer, actual_sources, this->channel_count_, local_frame_count)) {
|
||||
log_warn(category::audio, tr("failed to merge buffers!"));
|
||||
}
|
||||
|
||||
/* this->source_buffer could hold the amount of resampled data (checked above) */
|
||||
auto resampled_samples = this->resampler_->process(this->source_buffer, this->source_buffer, local_frame_count);
|
||||
if(resampled_samples <= 0) {
|
||||
log_warn(category::audio, tr("Failed to resample audio data for client ({})"));
|
||||
goto clear_buffer_exit;
|
||||
}
|
||||
|
||||
if(resampled_samples != out_frame_count) {
|
||||
if((size_t) resampled_samples > out_frame_count) {
|
||||
const auto diff_length = resampled_samples - out_frame_count;
|
||||
log_warn(category::audio, tr("enqueuing {} samples"), diff_length);
|
||||
const auto overhead_buffer_offset = this->resample_overhead_samples * sizeof(float) * this->channel_count_;
|
||||
const auto diff_byte_length = diff_length * sizeof(float) * this->channel_count_;
|
||||
|
||||
if(this->resample_overhead_buffer_length < diff_byte_length + overhead_buffer_offset) {
|
||||
this->resample_overhead_buffer_length = diff_byte_length + overhead_buffer_offset;
|
||||
auto new_buffer = malloc(this->resample_overhead_buffer_length);
|
||||
if(this->resample_overhead_buffer)
|
||||
memcpy(new_buffer, this->resample_overhead_buffer, overhead_buffer_offset);
|
||||
free(this->resample_overhead_buffer);
|
||||
this->resample_overhead_buffer = new_buffer;
|
||||
}
|
||||
memcpy(
|
||||
(char*) this->resample_overhead_buffer + overhead_buffer_offset,
|
||||
(char*) this->source_buffer + out_frame_count * sizeof(float) * this->channel_count_,
|
||||
diff_byte_length
|
||||
);
|
||||
this->resample_overhead_samples += diff_length;
|
||||
} else {
|
||||
log_warn(category::audio, tr("Resampled samples does not match requested sampeles: {} <> {}. Sampled from {} to {}"), resampled_samples, out_frame_count, this->resampler_->input_rate(), this->resampler_->output_rate());
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(output, this->source_buffer, out_frame_count * sizeof(float) * this->channel_count_);
|
||||
}
|
||||
|
||||
/* lets apply the volume */
|
||||
audio::apply_gain(output, this->channel_count_, out_frame_count, this->volume_modifier);
|
||||
} else {
|
||||
clear_buffer_exit:
|
||||
memset(output, 0, this->channel_count_ * sizeof(float) * out_frame_count);
|
||||
return;
|
||||
}
|
||||
return;
|
||||
zero_chunk_exit: {
|
||||
this->chunk_buffer_sample_length = (this->playback_->sample_rate() * kChunkTimeMs) / 1000;
|
||||
this->ensure_chunk_buffer_space(this->chunk_buffer_sample_length);
|
||||
memset(this->chunk_buffer, 0, this->chunk_buffer_sample_length * this->current_output_channels * sizeof(float));
|
||||
return;
|
||||
};
|
||||
}
|
||||
|
||||
void AudioOutput::set_device(const std::shared_ptr<AudioDevice> &new_device) {
|
||||
@ -558,6 +291,7 @@ bool AudioOutput::playback(std::string& error) {
|
||||
}
|
||||
}
|
||||
|
||||
this->ensure_chunk_buffer_space(0);
|
||||
this->playback_->register_source(this);
|
||||
return this->playback_->start(error);
|
||||
}
|
@ -5,6 +5,7 @@
|
||||
#include <memory>
|
||||
#include <iostream>
|
||||
#include <functional>
|
||||
#include <cassert>
|
||||
#include "./AudioSamples.h"
|
||||
#include "./driver/AudioDriver.h"
|
||||
#include "../ring_buffer.h"
|
||||
@ -16,6 +17,7 @@
|
||||
namespace tc::audio {
|
||||
class AudioOutput;
|
||||
class AudioResampler;
|
||||
class AudioProcessor;
|
||||
|
||||
enum struct OverflowStrategy {
|
||||
ignore,
|
||||
@ -91,6 +93,9 @@ namespace tc::audio {
|
||||
buffer{max_buffer_sample_count == -1 ? channel_count * sample_rate * sizeof(float) : max_buffer_sample_count * channel_count * sizeof(float)}
|
||||
{
|
||||
this->clear();
|
||||
|
||||
this->fadein_frame_samples_ = sample_rate * 0.02;
|
||||
this->fadeout_frame_samples_ = sample_rate * 0.016;
|
||||
}
|
||||
|
||||
size_t const channel_count_;
|
||||
@ -107,8 +112,8 @@ namespace tc::audio {
|
||||
* Fadeout and fadein properties.
|
||||
* The fadeout sample count should always be lower than the fade in sample count.
|
||||
*/
|
||||
size_t fadein_frame_samples_{960};
|
||||
size_t fadeout_frame_samples_{(size_t) (960 * .9)};
|
||||
size_t fadein_frame_samples_;
|
||||
size_t fadeout_frame_samples_;
|
||||
size_t fadeout_samples_left{0};
|
||||
|
||||
/* Methods bellow do not acquire the buffer_mutex mutex */
|
||||
@ -141,8 +146,15 @@ namespace tc::audio {
|
||||
|
||||
[[nodiscard]] inline float volume() const { return this->volume_modifier; }
|
||||
inline void set_volume(float value) { this->volume_modifier = value; }
|
||||
|
||||
void register_audio_processor(const std::shared_ptr<AudioProcessor>&);
|
||||
bool unregister_audio_processor(const std::shared_ptr<AudioProcessor>&);
|
||||
private:
|
||||
void fill_buffer(void *, size_t out_frame_count, size_t out_channels) override;
|
||||
/* One audio chunk should be 10ms long */
|
||||
constexpr static auto kChunkTimeMs{10};
|
||||
|
||||
void fill_buffer(void *, size_t request_sample_count, size_t out_channels) override;
|
||||
void fill_chunk_buffer();
|
||||
|
||||
size_t const channel_count_;
|
||||
size_t const sample_rate_;
|
||||
@ -150,23 +162,37 @@ namespace tc::audio {
|
||||
std::mutex sources_mutex{};
|
||||
std::deque<std::weak_ptr<AudioOutputSource>> sources_{};
|
||||
|
||||
std::mutex processors_mutex{};
|
||||
std::vector<std::shared_ptr<AudioProcessor>> audio_processors_{};
|
||||
|
||||
std::recursive_mutex device_lock{};
|
||||
std::shared_ptr<AudioDevice> device{nullptr};
|
||||
std::shared_ptr<AudioDevicePlayback> playback_{nullptr};
|
||||
std::unique_ptr<AudioResampler> resampler_{nullptr};
|
||||
|
||||
/* only access there buffers within the audio loop! */
|
||||
void* source_buffer{nullptr};
|
||||
void** source_merge_buffer{nullptr};
|
||||
size_t source_merge_buffer_length{0};
|
||||
|
||||
void* resample_overhead_buffer{nullptr};
|
||||
size_t resample_overhead_buffer_length{0};
|
||||
size_t resample_overhead_samples{0};
|
||||
/*
|
||||
* The chunk buffer will be large enough to hold
|
||||
* a chunk of pcm data with our current configuration.
|
||||
*/
|
||||
void* chunk_buffer{nullptr};
|
||||
size_t chunk_buffer_length{0};
|
||||
size_t chunk_buffer_sample_length{0};
|
||||
size_t chunk_buffer_sample_offset{0};
|
||||
|
||||
size_t source_buffer_length = 0;
|
||||
size_t source_merge_buffer_length = 0;
|
||||
size_t current_output_channels{0};
|
||||
|
||||
void ensure_chunk_buffer_space(size_t /* output samples */);
|
||||
void cleanup_buffers();
|
||||
|
||||
float volume_modifier{1.f};
|
||||
|
||||
[[nodiscard]] inline auto chunk_local_sample_count() const {
|
||||
assert(this->playback_);
|
||||
return (this->sample_rate_ * kChunkTimeMs) / 1000;
|
||||
}
|
||||
};
|
||||
}
|
312
native/serverconnection/src/audio/AudioOutputSource.cpp
Normal file
312
native/serverconnection/src/audio/AudioOutputSource.cpp
Normal file
@ -0,0 +1,312 @@
|
||||
#include "./AudioOutput.h"
|
||||
#include "./AudioResampler.h"
|
||||
#include "../logger.h"
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
|
||||
using namespace std;
|
||||
using namespace tc;
|
||||
using namespace tc::audio;
|
||||
|
||||
void AudioOutputSource::clear() {
|
||||
std::lock_guard buffer_lock{this->buffer_mutex};
|
||||
this->buffer.clear();
|
||||
this->buffer_state = BufferState::buffering;
|
||||
this->fadeout_samples_left = 0;
|
||||
}
|
||||
|
||||
void AudioOutputSource::apply_fadeout() {
|
||||
const auto samples_available = this->currently_buffered_samples();
|
||||
auto fade_samples = std::min(samples_available, this->fadeout_frame_samples_);
|
||||
if(!fade_samples) {
|
||||
this->fadeout_samples_left = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
const auto sample_byte_size = this->channel_count_ * sizeof(float) * fade_samples;
|
||||
assert(this->buffer.fill_count() >= sample_byte_size);
|
||||
auto write_ptr = (float*) ((char*) this->buffer.read_ptr() + (this->buffer.fill_count() - sample_byte_size));
|
||||
|
||||
for(size_t index{0}; index < fade_samples; index++) {
|
||||
const auto offset = (float) ((float) (index + 1) / (float) fade_samples);
|
||||
const auto volume = std::min(log10f(offset) / -2.71828182845904f, 1.f);
|
||||
|
||||
for(int channel{0}; channel < this->channel_count_; channel++) {
|
||||
*write_ptr++ *= volume;
|
||||
}
|
||||
}
|
||||
|
||||
this->fadeout_samples_left = fade_samples;
|
||||
}
|
||||
|
||||
void AudioOutputSource::apply_fadein() {
|
||||
assert(this->currently_buffered_samples() >= this->fadeout_samples_left);
|
||||
const auto samples_available = this->currently_buffered_samples();
|
||||
auto fade_samples = std::min(samples_available - this->fadeout_samples_left, this->fadein_frame_samples_);
|
||||
if(!fade_samples) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: We're using the read_ptr() here in order to correctly apply the effect.
|
||||
* This isn't really best practice but works.
|
||||
*/
|
||||
auto write_ptr = (float*) this->buffer.read_ptr() + this->fadeout_samples_left * this->channel_count_;
|
||||
for(size_t index{0}; index < fade_samples; index++) {
|
||||
const auto offset = (float) ((float) (index + 1) / (float) fade_samples);
|
||||
const auto volume = std::min(log10f(1 - offset) / -2.71828182845904f, 1.f);
|
||||
|
||||
for(int channel{0}; channel < this->channel_count_; channel++) {
|
||||
*write_ptr++ *= volume;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool AudioOutputSource::pop_samples(void *target_buffer, size_t target_sample_count) {
|
||||
std::unique_lock buffer_lock{this->buffer_mutex};
|
||||
auto result = this->pop_samples_(target_buffer, target_sample_count);
|
||||
buffer_lock.unlock();
|
||||
|
||||
if(auto callback{this->on_read}; callback) {
|
||||
callback();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool AudioOutputSource::pop_samples_(void *target_buffer, size_t target_sample_count) {
|
||||
switch(this->buffer_state) {
|
||||
case BufferState::fadeout: {
|
||||
/* Write as much we can */
|
||||
const auto write_samples = std::min(this->fadeout_samples_left, target_sample_count);
|
||||
const auto write_byte_size = write_samples * this->channel_count_ * sizeof(float);
|
||||
memcpy(target_buffer, this->buffer.read_ptr(), write_byte_size);
|
||||
this->buffer.advance_read_ptr(write_byte_size);
|
||||
|
||||
/* Fill the rest with silence */
|
||||
const auto empty_samples = target_sample_count - write_samples;
|
||||
const auto empty_byte_size = empty_samples * this->channel_count_ * sizeof(float);
|
||||
memset((char*) target_buffer + write_byte_size, 0, empty_byte_size);
|
||||
|
||||
this->fadeout_samples_left -= write_samples;
|
||||
if(!this->fadeout_samples_left) {
|
||||
log_trace(category::audio, tr("{} Successfully replayed fadeout sequence."), (void*) this);
|
||||
this->buffer_state = BufferState::buffering;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
case BufferState::playing: {
|
||||
const auto buffered_samples = this->currently_buffered_samples();
|
||||
if(buffered_samples < target_sample_count + this->fadeout_frame_samples_) {
|
||||
const auto missing_samples = target_sample_count + this->fadeout_frame_samples_ - buffered_samples;
|
||||
if(auto callback{this->on_underflow}; callback) {
|
||||
if(callback(missing_samples)) {
|
||||
/* We've been filled up again. Trying again to fill the output buffer. */
|
||||
return this->pop_samples(target_buffer, target_sample_count);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* When consuming target_sample_count amount samples of our buffer we could not
|
||||
* apply the fadeout effect any more. Instead we're applying it now and returning to buffering state.
|
||||
*/
|
||||
this->apply_fadeout();
|
||||
|
||||
/* Write the rest of unmodified buffer */
|
||||
const auto write_samples = buffered_samples - this->fadeout_samples_left;
|
||||
assert(write_samples <= target_sample_count);
|
||||
const auto write_byte_size = write_samples * this->channel_count_ * sizeof(float);
|
||||
memcpy(target_buffer, this->buffer.read_ptr(), write_byte_size);
|
||||
this->buffer.advance_read_ptr(write_byte_size);
|
||||
|
||||
log_trace(category::audio, tr("{} Starting stream fadeout. Requested samples {}, Buffered samples: {}, Fadeout frame samples: {}, Returned normal samples: {}"),
|
||||
(void*) this, target_sample_count, buffered_samples, this->fadeout_frame_samples_, write_samples
|
||||
);
|
||||
|
||||
this->buffer_state = BufferState::fadeout;
|
||||
if(write_samples < target_sample_count) {
|
||||
/* Fill the rest of the buffer with the fadeout content */
|
||||
this->pop_samples((char*) target_buffer + write_byte_size, target_sample_count - write_samples);
|
||||
}
|
||||
} else {
|
||||
/* We can just normally copy the buffer */
|
||||
const auto write_byte_size = target_sample_count * this->channel_count_ * sizeof(float);
|
||||
memcpy(target_buffer, this->buffer.read_ptr(), write_byte_size);
|
||||
this->buffer.advance_read_ptr(write_byte_size);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
case BufferState::buffering:
|
||||
/* Nothing to replay */
|
||||
return false;
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ssize_t AudioOutputSource::enqueue_samples(const void *source_buffer, size_t sample_count) {
|
||||
std::lock_guard buffer_lock{this->buffer_mutex};
|
||||
return this->enqueue_samples_(source_buffer, sample_count);
|
||||
}
|
||||
|
||||
ssize_t AudioOutputSource::enqueue_samples_(const void *source_buffer, size_t sample_count) {
|
||||
switch(this->buffer_state) {
|
||||
case BufferState::fadeout:
|
||||
case BufferState::buffering: {
|
||||
assert(this->currently_buffered_samples() >= this->fadeout_samples_left);
|
||||
assert(this->min_buffered_samples_ >= this->currently_buffered_samples() - this->fadeout_samples_left);
|
||||
const auto missing_samples = this->min_buffered_samples_ - (this->currently_buffered_samples() - this->fadeout_samples_left);
|
||||
const auto write_sample_count = std::min(missing_samples, sample_count);
|
||||
const auto write_byte_size = write_sample_count * this->channel_count_ * sizeof(float);
|
||||
|
||||
assert(write_sample_count <= this->max_supported_buffering());
|
||||
memcpy(this->buffer.write_ptr(), source_buffer, write_byte_size);
|
||||
this->buffer.advance_write_ptr(write_byte_size);
|
||||
|
||||
if(sample_count < missing_samples) {
|
||||
/* we still need to buffer */
|
||||
return sample_count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Even though we still have fadeout samples left we don't declare them as such since we've already fulled
|
||||
* our future buffer.
|
||||
*/
|
||||
this->fadeout_samples_left = 0;
|
||||
|
||||
/* buffering finished */
|
||||
log_trace(category::audio, tr("{} Finished buffering {} samples. Fading them in."), (void*) this, this->min_buffered_samples_);
|
||||
this->apply_fadein();
|
||||
this->buffer_state = BufferState::playing;
|
||||
if(sample_count > missing_samples) {
|
||||
/* we've more data to write */
|
||||
return this->enqueue_samples((const char*) source_buffer + write_byte_size, sample_count - missing_samples) + write_sample_count;
|
||||
} else {
|
||||
return write_sample_count;
|
||||
}
|
||||
}
|
||||
|
||||
case BufferState::playing: {
|
||||
const auto buffered_samples = this->currently_buffered_samples();
|
||||
|
||||
const auto write_sample_count = std::min(this->max_supported_buffering() - buffered_samples, sample_count);
|
||||
const auto write_byte_size = write_sample_count * this->channel_count_ * sizeof(float);
|
||||
|
||||
memcpy(this->buffer.write_ptr(), source_buffer, write_byte_size);
|
||||
this->buffer.advance_write_ptr(write_byte_size);
|
||||
|
||||
if(write_sample_count < sample_count) {
|
||||
if(auto callback{this->on_overflow}; callback) {
|
||||
callback(sample_count - write_sample_count);
|
||||
}
|
||||
|
||||
switch (this->overflow_strategy) {
|
||||
case OverflowStrategy::discard_input:
|
||||
return -2;
|
||||
|
||||
case OverflowStrategy::discard_buffer_all:
|
||||
this->buffer.clear();
|
||||
break;
|
||||
|
||||
case OverflowStrategy::discard_buffer_half:
|
||||
/* FIXME: This implementation is wrong! */
|
||||
this->buffer.advance_read_ptr(this->buffer.fill_count() / 2);
|
||||
break;
|
||||
|
||||
case OverflowStrategy::ignore:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return write_sample_count;
|
||||
}
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr static auto kMaxStackBuffer{1024 * 8 * sizeof(float)};
|
||||
ssize_t AudioOutputSource::enqueue_samples_no_interleave(const void *source_buffer, size_t samples) {
|
||||
if(this->channel_count_ == 1) {
|
||||
return this->enqueue_samples(source_buffer, samples);
|
||||
} else if(this->channel_count_ == 2) {
|
||||
const auto buffer_byte_size = samples * this->channel_count_ * sizeof(float);
|
||||
if(buffer_byte_size > kMaxStackBuffer) {
|
||||
/* We can't convert to interleave */
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint8_t stack_buffer[kMaxStackBuffer];
|
||||
{
|
||||
auto src_buffer = (const float*) source_buffer;
|
||||
auto target_buffer = (float*) stack_buffer;
|
||||
|
||||
auto samples_to_write = samples;
|
||||
while (samples_to_write-- > 0) {
|
||||
*target_buffer = *src_buffer;
|
||||
*(target_buffer + 1) = *(src_buffer + samples);
|
||||
|
||||
target_buffer += 2;
|
||||
src_buffer++;
|
||||
}
|
||||
}
|
||||
|
||||
return this->enqueue_samples(stack_buffer, samples);
|
||||
} else {
|
||||
/* TODO: Generalize to interleave algo */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool AudioOutputSource::set_max_buffered_samples(size_t samples) {
|
||||
samples = std::max(samples, (size_t) this->fadein_frame_samples_);
|
||||
if(samples > this->max_supported_buffering()) {
|
||||
samples = this->max_supported_buffering();
|
||||
}
|
||||
|
||||
std::lock_guard buffer_lock{this->buffer_mutex};
|
||||
if(samples < this->min_buffered_samples_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
this->max_buffered_samples_ = samples;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AudioOutputSource::set_min_buffered_samples(size_t samples) {
|
||||
samples = std::max(samples, (size_t) this->fadein_frame_samples_);
|
||||
|
||||
std::lock_guard buffer_lock{this->buffer_mutex};
|
||||
if(samples > this->max_buffered_samples_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
this->min_buffered_samples_ = samples;
|
||||
switch(this->buffer_state) {
|
||||
case BufferState::fadeout:
|
||||
case BufferState::buffering: {
|
||||
assert(this->currently_buffered_samples() >= this->fadeout_samples_left);
|
||||
const auto buffered_samples = this->currently_buffered_samples() - this->fadeout_samples_left;
|
||||
if(buffered_samples > this->min_buffered_samples_) {
|
||||
log_trace(category::audio, tr("{} Finished buffering {} samples (due to min buffered sample reduce). Fading them in."), (void*) this, this->min_buffered_samples_);
|
||||
this->apply_fadein();
|
||||
this->buffer_state = BufferState::playing;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
case BufferState::playing:
|
||||
return true;
|
||||
|
||||
default:
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
}
|
@ -4,19 +4,20 @@
|
||||
|
||||
using namespace tc::audio;
|
||||
|
||||
Reframer::Reframer(size_t channels, size_t frame_size) : _frame_size(frame_size), _channels(channels) {
|
||||
InputReframer::InputReframer(size_t channels, size_t frame_size) : _frame_size(frame_size), _channels(channels) {
|
||||
this->buffer = nullptr;
|
||||
this->_buffer_index = 0;
|
||||
}
|
||||
|
||||
Reframer::~Reframer() {
|
||||
InputReframer::~InputReframer() {
|
||||
if(this->buffer)
|
||||
free(this->buffer);
|
||||
}
|
||||
|
||||
void Reframer::process(const void *source, size_t samples) {
|
||||
if(!this->buffer)
|
||||
this->buffer = (float*) malloc(this->_channels * this->_frame_size * sizeof(float));
|
||||
void InputReframer::process(const void *source, size_t samples) {
|
||||
if(!this->buffer) {
|
||||
this->buffer = (float*) malloc(this->_channels * this->_frame_size * sizeof(float));
|
||||
}
|
||||
assert(this->on_frame);
|
||||
|
||||
if(this->_buffer_index > 0) {
|
||||
|
@ -4,15 +4,15 @@
|
||||
#include <cstdio>
|
||||
|
||||
namespace tc::audio {
|
||||
class Reframer {
|
||||
class InputReframer {
|
||||
public:
|
||||
Reframer(size_t channels, size_t frame_size);
|
||||
virtual ~Reframer();
|
||||
InputReframer(size_t channels, size_t frame_size);
|
||||
virtual ~InputReframer();
|
||||
|
||||
void process(const void* /* source */, size_t /* samples */);
|
||||
|
||||
inline size_t channels() const { return this->_channels; }
|
||||
inline size_t frame_size() const { return this->_frame_size; }
|
||||
[[nodiscard]] inline size_t channels() const { return this->_channels; }
|
||||
[[nodiscard]] inline size_t frame_size() const { return this->_frame_size; }
|
||||
|
||||
std::function<void(const void* /* buffer */)> on_frame;
|
||||
private:
|
||||
|
@ -21,8 +21,8 @@ AudioResampler::~AudioResampler() {
|
||||
soxr_delete(this->soxr_handle);
|
||||
}
|
||||
|
||||
ssize_t AudioResampler::process(void *output, const void *input, size_t input_length) {
|
||||
if(this->io_ratio() == 1) {
|
||||
bool AudioResampler::process(void *output, const void *input, size_t input_length, size_t& output_length) {
|
||||
if(this->output_rate_ == this->input_rate_) {
|
||||
if(input != output) {
|
||||
memcpy(output, input, input_length * this->channels_ * 4);
|
||||
}
|
||||
@ -31,15 +31,15 @@ ssize_t AudioResampler::process(void *output, const void *input, size_t input_le
|
||||
}
|
||||
|
||||
if(!this->soxr_handle) {
|
||||
return -2;
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t output_length = 0;
|
||||
auto error = soxr_process(this->soxr_handle, input, input_length, nullptr, output, this->estimated_output_size(input_length), &output_length);
|
||||
assert(output_length > 0);
|
||||
auto error = soxr_process(this->soxr_handle, input, input_length, nullptr, output, output_length, &output_length);
|
||||
if(error) {
|
||||
log_error(category::audio, tr("Failed to process resample: {}"), error);
|
||||
return -1;
|
||||
return false;
|
||||
}
|
||||
|
||||
return output_length;
|
||||
return true;
|
||||
}
|
@ -31,7 +31,7 @@ namespace tc::audio {
|
||||
|
||||
[[nodiscard]] inline bool valid() { return this->io_ratio() == 1 || this->soxr_handle != nullptr; }
|
||||
|
||||
[[nodiscard]] ssize_t process(void* /* output */, const void* /* input */, size_t /* input length */);
|
||||
[[nodiscard]] bool process(void* /* output */, const void* /* input */, size_t /* input length */, size_t& /* output length */);
|
||||
private:
|
||||
size_t const channels_{0};
|
||||
size_t const input_rate_{0};
|
||||
|
@ -181,7 +181,7 @@ NAN_METHOD(AudioOutputStreamWrapper::_write_data_rated) {
|
||||
}
|
||||
|
||||
//TODO: Use a tmp preallocated buffer here!
|
||||
ssize_t target_samples = client->_resampler->estimated_output_size(samples);
|
||||
size_t target_samples = client->_resampler->estimated_output_size(samples);
|
||||
auto buffer = SampleBuffer::allocate((uint8_t) handle->channel_count(), max((uint16_t) samples, (uint16_t) target_samples));
|
||||
auto source_buffer = js_buffer.Data();
|
||||
if(!interleaved) {
|
||||
@ -199,8 +199,7 @@ NAN_METHOD(AudioOutputStreamWrapper::_write_data_rated) {
|
||||
source_buffer = buffer->sample_data;
|
||||
}
|
||||
|
||||
target_samples = client->_resampler->process(buffer->sample_data, source_buffer, samples);
|
||||
if(target_samples < 0) {
|
||||
if(!client->_resampler->process(buffer->sample_data, source_buffer, samples, target_samples)) {
|
||||
Nan::ThrowError("Resampling failed");
|
||||
return;
|
||||
}
|
||||
|
@ -0,0 +1,11 @@
|
||||
//
|
||||
// Created by WolverinDEV on 27/03/2021.
|
||||
//
|
||||
|
||||
#include "AudioProcessor.h"
|
||||
|
||||
using namespace tc::audio;
|
||||
|
||||
void AudioProcessor::analyze_reverse_stream(const float *const *data, const webrtc::StreamConfig &reverse_config) {
|
||||
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <modules/audio_processing/include/audio_processing.h>
|
||||
|
||||
namespace tc::audio {
|
||||
class AudioProcessor : public std::enable_shared_from_this<AudioProcessor> {
|
||||
public:
|
||||
|
||||
/**
|
||||
* Accepts deinterleaved float audio with the range [-1, 1]. Each element
|
||||
* of |data| points to a channel buffer, arranged according to
|
||||
* |reverse_config|.
|
||||
*/
|
||||
void analyze_reverse_stream(const float* const* data,
|
||||
const webrtc::StreamConfig& reverse_config);
|
||||
private:
|
||||
};
|
||||
}
|
@ -95,8 +95,9 @@ namespace tc::audio::sounds {
|
||||
}
|
||||
|
||||
if(!global_audio_output) {
|
||||
if(auto callback{this->settings_.callback}; callback)
|
||||
if(auto callback{this->settings_.callback}; callback) {
|
||||
callback(PlaybackResult::SOUND_NOT_INITIALIZED, "");
|
||||
}
|
||||
this->finalize(false);
|
||||
return;
|
||||
}
|
||||
@ -106,18 +107,24 @@ namespace tc::audio::sounds {
|
||||
auto max_samples = (size_t)
|
||||
std::max(this->output_source->sample_rate(), this->file_handle->sample_rate()) * kBufferChunkTimespan * 8 *
|
||||
std::max(this->file_handle->channels(), this->output_source->channel_count());
|
||||
|
||||
this->cache_buffer = ::malloc((size_t) (max_samples * sizeof(float)));
|
||||
|
||||
if(!this->cache_buffer) {
|
||||
if(auto callback{this->settings_.callback}; callback)
|
||||
if(auto callback{this->settings_.callback}; callback) {
|
||||
callback(PlaybackResult::PLAYBACK_ERROR, "failed to allocate cached buffer");
|
||||
}
|
||||
|
||||
this->finalize(false);
|
||||
return;
|
||||
}
|
||||
this->state_ = PLAYER_STATE_PLAYING;
|
||||
}
|
||||
|
||||
if(this->state_ == PLAYER_STATE_PLAYING) {
|
||||
if(!this->could_enqueue_next_buffer()) return;
|
||||
if(!this->could_enqueue_next_buffer()) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto samples_to_read = (size_t) (this->file_handle->sample_rate() * kBufferChunkTimespan);
|
||||
auto errc = this->file_handle->read(this->cache_buffer, &samples_to_read);
|
||||
@ -143,9 +150,9 @@ namespace tc::audio::sounds {
|
||||
return;
|
||||
}
|
||||
|
||||
auto resampled_samples = this->resampler->process(this->cache_buffer, this->cache_buffer, samples_to_read);
|
||||
if(resampled_samples <= 0) {
|
||||
log_warn(category::audio, tr("failed to resample file audio buffer ({})"), resampled_samples);
|
||||
size_t resampled_samples{this->cache_buffer_sample_size()};
|
||||
if(!this->resampler->process(this->cache_buffer, this->cache_buffer, samples_to_read, resampled_samples)) {
|
||||
log_warn(category::audio, tr("failed to resample file audio buffer."));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -218,7 +225,9 @@ namespace tc::audio::sounds {
|
||||
|
||||
const auto current_size = this->output_source->currently_buffered_samples();
|
||||
const auto max_size = this->output_source->max_buffered_samples();
|
||||
if(current_size > max_size) return false;
|
||||
if(current_size > max_size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto size_left = max_size - current_size;
|
||||
return size_left >= this->cache_buffer_sample_size() * 1.5; /* ensure we've a bit more space */
|
||||
|
@ -21,6 +21,9 @@
|
||||
#include "audio/AudioEventLoop.h"
|
||||
#include "audio/sounds/SoundPlayer.h"
|
||||
|
||||
//#include <webrtc-audio-processing-1/modules/audio_processing/include/audio_processing.h>
|
||||
#include <modules/audio_processing/include/audio_processing.h>
|
||||
|
||||
#ifndef WIN32
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
@ -37,6 +40,14 @@ using namespace tc;
|
||||
using namespace tc::connection;
|
||||
using namespace tc::ft;
|
||||
|
||||
void processor() {
|
||||
webrtc::AudioProcessingBuilder builder{};
|
||||
webrtc::AudioProcessing::Config config{};
|
||||
|
||||
auto processor = builder.Create();
|
||||
//processor->AnalyzeReverseStream()
|
||||
}
|
||||
|
||||
void testTomMath(){
|
||||
mp_int x{};
|
||||
mp_init(&x);
|
||||
@ -157,10 +168,12 @@ NAN_MODULE_INIT(init) {
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Test error codes and make the audi playback device configurable */
|
||||
global_audio_output = new tc::audio::AudioOutput(2, 44100);
|
||||
/* TODO: Make the sample rate configurable! */
|
||||
/* Adjusting the sample rate works flawlessly (tested from 4000 to 96000) */
|
||||
global_audio_output = new tc::audio::AudioOutput(2, 48000);
|
||||
global_audio_output->set_device(default_output);
|
||||
if(!global_audio_output->playback(error)) {
|
||||
/* TODO: Better impl of error handling */
|
||||
logger::error(category::audio, "Failed to start audio playback: {}", error);
|
||||
}
|
||||
});
|
||||
|
@ -196,12 +196,17 @@ void VoiceSender::encode_raw_frame(const std::unique_ptr<AudioFrame> &frame) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto resampled_samples = codec_data->resampler->process(this->_buffer, this->_buffer, merged_channel_byte_size / codec_channels / 4);
|
||||
if(resampled_samples <= 0) {
|
||||
log_error(category::voice_connection, tr("Resampler returned {}"), resampled_samples);
|
||||
auto resampled_samples{estimated_resampled_byte_size / frame->channels / sizeof(float)};
|
||||
if(!codec_data->resampler->process(this->_buffer, this->_buffer, merged_channel_byte_size / codec_channels / 4, resampled_samples)) {
|
||||
log_error(category::voice_connection, tr("Failed to resample buffer."), resampled_samples);
|
||||
return;
|
||||
}
|
||||
|
||||
if(!resampled_samples) {
|
||||
/* we don't have any data */
|
||||
return;
|
||||
}
|
||||
|
||||
if(resampled_samples * codec_channels * 4 != codec_data->converter->bytes_per_frame()) {
|
||||
log_error(category::voice_connection,
|
||||
tr("Could not encode audio frame. Frame length is not equal to code frame length! Codec: {}, Packet: {}"),
|
||||
|
@ -697,9 +697,7 @@ void VoiceClient::event_execute(const std::chrono::system_clock::time_point &sch
|
||||
|
||||
//TODO: Use statically allocated buffer?
|
||||
auto decoded = this->decode_buffer(audio_codec.codec, replay_head->buffer, false);
|
||||
if(!decoded) {
|
||||
log_warn(category::audio, tr("Failed to decode buffer for client {} (nullptr). Dropping buffer."), this->client_id_, error);
|
||||
} else {
|
||||
if(decoded) {
|
||||
if(is_new_audio_stream) {
|
||||
log_warn(category::audio, tr("New audio chunk for client {}"), this->client_id_);
|
||||
|
||||
@ -797,17 +795,23 @@ std::shared_ptr<audio::SampleBuffer> VoiceClient::decode_buffer(const codec::val
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if(TEMP_BUFFER_LENGTH < codec_data.resampler->estimated_output_size(samples) * this->output_source->channel_count() * sizeof(float)) {
|
||||
auto estimated_output_samples = codec_data.resampler->estimated_output_size(samples);
|
||||
if(TEMP_BUFFER_LENGTH < estimated_output_samples * this->output_source->channel_count() * sizeof(float)) {
|
||||
log_warn(category::voice_connection, tr("Failed to resample audio data. Target buffer is smaller then expected bytes ({} < {})"), TEMP_BUFFER_LENGTH, (codec_data.resampler->estimated_output_size(samples) * this->output_source->channel_count() * 4));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto resampled_samples = codec_data.resampler->process(target_buffer, target_buffer, samples);
|
||||
if(resampled_samples <= 0) {
|
||||
log_warn(category::voice_connection, tr("Failed to resample audio data. Resampler resulted in {}"), resampled_samples);
|
||||
auto resampled_samples{estimated_output_samples};
|
||||
if(!codec_data.resampler->process(target_buffer, target_buffer, samples, resampled_samples)) {
|
||||
log_warn(category::voice_connection, tr("Failed to resample audio data."));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if(!resampled_samples) {
|
||||
/* we don't seem to have any output samples */
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
audio::apply_gain(target_buffer, this->output_source->channel_count(), resampled_samples, this->volume_);
|
||||
auto audio_buffer = audio::SampleBuffer::allocate((uint8_t) this->output_source->channel_count(), (uint16_t) resampled_samples);
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
// Created by wolverindev on 07.02.20.
|
||||
//
|
||||
|
||||
#include "ring_buffer.h"
|
||||
#include "./ring_buffer.h"
|
||||
#include <cassert>
|
||||
|
||||
#ifdef HAVE_SOUNDIO
|
||||
@ -78,12 +78,13 @@ namespace tc {
|
||||
#endif
|
||||
|
||||
static inline size_t ceil_dbl_to_size_t(double x) {
|
||||
const auto truncation = (double) x;
|
||||
return (size_t) (truncation + (truncation < x));
|
||||
const auto truncation = (size_t) x;
|
||||
return truncation + (truncation < x);
|
||||
}
|
||||
|
||||
ring_buffer::ring_buffer(size_t min_capacity) {
|
||||
this->allocate_memory(min_capacity);
|
||||
auto result = this->allocate_memory(min_capacity);
|
||||
assert(result);
|
||||
}
|
||||
|
||||
ring_buffer::~ring_buffer() {
|
||||
@ -91,7 +92,8 @@ namespace tc {
|
||||
}
|
||||
|
||||
bool ring_buffer::allocate_memory(size_t requested_capacity) {
|
||||
size_t actual_capacity = ceil_dbl_to_size_t(requested_capacity / (double) soundio_os_page_size()) * soundio_os_page_size();
|
||||
size_t actual_capacity = ceil_dbl_to_size_t((double) requested_capacity / (double) soundio_os_page_size()) * soundio_os_page_size();
|
||||
assert(actual_capacity > 0);
|
||||
|
||||
this->memory.address = nullptr;
|
||||
#ifdef WIN32
|
||||
@ -125,8 +127,7 @@ namespace tc {
|
||||
}
|
||||
}
|
||||
|
||||
char *addr2 = (char*)MapViewOfFileEx(hMapFile, FILE_MAP_WRITE, 0, 0,
|
||||
actual_capacity, address + actual_capacity);
|
||||
char *addr2 = (char*)MapViewOfFileEx(hMapFile, FILE_MAP_WRITE, 0, 0, actual_capacity, address + actual_capacity);
|
||||
if (addr2 != address + actual_capacity) {
|
||||
ok = UnmapViewOfFile(addr1);
|
||||
assert(ok);
|
||||
|
Loading…
Reference in New Issue
Block a user