339 lines
11 KiB
TypeScript
339 lines
11 KiB
TypeScript
import {
|
|
AbstractVoiceConnection,
|
|
VoiceConnectionStatus,
|
|
WhisperSessionInitializer
|
|
} from "tc-shared/connection/VoiceConnection";
|
|
import {RecorderProfile} from "tc-shared/voice/RecorderProfile";
|
|
import {NativeServerConnection, NativeVoiceClient, NativeVoiceConnection, PlayerState} from "tc-native/connection";
|
|
import {ServerConnection} from "./ServerConnection";
|
|
import {VoiceClient} from "tc-shared/voice/VoiceClient";
|
|
import {WhisperSession, WhisperTarget} from "tc-shared/voice/VoiceWhisper";
|
|
import {NativeInput} from "../audio/AudioRecorder";
|
|
import {ConnectionState} from "tc-shared/ConnectionHandler";
|
|
import {VoicePlayerEvents, VoicePlayerLatencySettings, VoicePlayerState} from "tc-shared/voice/VoicePlayer";
|
|
import {Registry} from "tc-shared/events";
|
|
import {LogCategory, logDebug, logInfo, logWarn} from "tc-shared/log";
|
|
import {tr} from "tc-shared/i18n/localize";
|
|
import {ConnectionStatistics} from "tc-shared/connection/ConnectionBase";
|
|
|
|
export class NativeVoiceConnectionWrapper extends AbstractVoiceConnection {
|
|
private readonly serverConnectionStateChangedListener;
|
|
private readonly native: NativeVoiceConnection;
|
|
|
|
private localAudioStarted = false;
|
|
private connectionState: VoiceConnectionStatus;
|
|
private currentRecorder: RecorderProfile;
|
|
|
|
private registeredVoiceClients: {[key: number]: NativeVoiceClientWrapper} = {};
|
|
|
|
private currentlyReplayingAudio = false;
|
|
private readonly voiceClientStateChangedEventListener;
|
|
|
|
constructor(connection: ServerConnection, voice: NativeVoiceConnection) {
|
|
super(connection);
|
|
this.native = voice;
|
|
|
|
this.serverConnectionStateChangedListener = () => {
|
|
if(this.connection.getConnectionState() === ConnectionState.CONNECTED) {
|
|
this.setConnectionState(VoiceConnectionStatus.Connected);
|
|
} else {
|
|
this.setConnectionState(VoiceConnectionStatus.Disconnected);
|
|
}
|
|
}
|
|
|
|
this.connection.events.on("notify_connection_state_changed", this.serverConnectionStateChangedListener);
|
|
this.connectionState = VoiceConnectionStatus.Disconnected;
|
|
|
|
this.voiceClientStateChangedEventListener = this.handleVoiceClientStateChange.bind(this);
|
|
}
|
|
|
|
destroy() {
|
|
this.connection.events.off("notify_connection_state_changed", this.serverConnectionStateChangedListener);
|
|
}
|
|
|
|
getConnectionState(): VoiceConnectionStatus {
|
|
return this.connectionState;
|
|
}
|
|
|
|
getFailedMessage(): string {
|
|
/* the native voice connection can't fail */
|
|
return "this message should never appear";
|
|
}
|
|
|
|
private setConnectionState(state: VoiceConnectionStatus) {
|
|
if(this.connectionState === state) {
|
|
return;
|
|
}
|
|
|
|
const oldState = this.connectionState;
|
|
this.connectionState = state;
|
|
this.events.fire("notify_connection_status_changed", { oldStatus: oldState, newStatus: state });
|
|
}
|
|
|
|
encodingSupported(codec: number): boolean {
|
|
return this.native.encoding_supported(codec);
|
|
}
|
|
|
|
decodingSupported(codec: number): boolean {
|
|
return this.native.decoding_supported(codec);
|
|
}
|
|
|
|
async acquireVoiceRecorder(recorder: RecorderProfile | undefined): Promise<void> {
|
|
if(this.currentRecorder === recorder) {
|
|
return;
|
|
}
|
|
|
|
if(this.currentRecorder) {
|
|
this.currentRecorder.callback_unmount = undefined;
|
|
this.native.set_audio_source(undefined);
|
|
|
|
this.handleVoiceEndEvent();
|
|
await this.currentRecorder.unmount();
|
|
this.currentRecorder = undefined;
|
|
}
|
|
|
|
await recorder?.unmount();
|
|
this.currentRecorder = recorder;
|
|
|
|
try {
|
|
if(recorder) {
|
|
if(!(recorder.input instanceof NativeInput)) {
|
|
this.currentRecorder = undefined;
|
|
throw "Recorder input must be an instance of NativeInput!";
|
|
}
|
|
|
|
recorder.current_handler = this.connection.client;
|
|
recorder.callback_unmount = () => {
|
|
logDebug(LogCategory.VOICE, tr("Lost voice recorder..."));
|
|
this.acquireVoiceRecorder(undefined);
|
|
};
|
|
|
|
recorder.callback_start = this.handleVoiceStartEvent.bind(this);
|
|
recorder.callback_stop = this.handleVoiceEndEvent.bind(this);
|
|
|
|
this.native.set_audio_source(recorder.input.getNativeConsumer());
|
|
}
|
|
} catch(error) {
|
|
this.currentRecorder = undefined;
|
|
throw error;
|
|
}
|
|
this.events.fire("notify_recorder_changed", {});
|
|
}
|
|
|
|
voiceRecorder(): RecorderProfile {
|
|
return this.currentRecorder;
|
|
}
|
|
|
|
getEncoderCodec(): number {
|
|
return this.native.get_encoder_codec();
|
|
}
|
|
|
|
setEncoderCodec(codec: number) {
|
|
this.native.set_encoder_codec(codec);
|
|
}
|
|
|
|
isReplayingVoice(): boolean {
|
|
return this.currentlyReplayingAudio;
|
|
}
|
|
|
|
private setReplayingVoice(status: boolean) {
|
|
if(status === this.currentlyReplayingAudio) {
|
|
return;
|
|
}
|
|
this.currentlyReplayingAudio = status;
|
|
this.events.fire("notify_voice_replay_state_change", { replaying: status });
|
|
}
|
|
|
|
private handleVoiceClientStateChange() {
|
|
this.setReplayingVoice(this.availableVoiceClients().findIndex(client => client.getState() === VoicePlayerState.PLAYING || client.getState() === VoicePlayerState.BUFFERING) !== -1);
|
|
}
|
|
|
|
private handleVoiceStartEvent() {
|
|
const chandler = this.connection.client;
|
|
if(chandler.isMicrophoneMuted()) {
|
|
logWarn(LogCategory.VOICE, tr("Received local voice started event, even thou we're muted!"));
|
|
return;
|
|
}
|
|
|
|
this.native.enable_voice_send(true);
|
|
this.localAudioStarted = true;
|
|
logInfo(LogCategory.VOICE, tr("Local voice started"));
|
|
|
|
const ch = chandler.getClient();
|
|
if(ch) ch.speaking = true;
|
|
}
|
|
|
|
private handleVoiceEndEvent() {
|
|
this.native.enable_voice_send(false);
|
|
|
|
const chandler = this.connection.client;
|
|
const ch = chandler.getClient();
|
|
if(ch) ch.speaking = false;
|
|
|
|
if(!chandler.connected)
|
|
return false;
|
|
|
|
if(chandler.isMicrophoneMuted())
|
|
return false;
|
|
|
|
logInfo(LogCategory.VOICE, tr("Local voice ended"));
|
|
this.localAudioStarted = false;
|
|
}
|
|
|
|
availableVoiceClients(): NativeVoiceClientWrapper[] {
|
|
return Object.keys(this.registeredVoiceClients).map(clientId => this.registeredVoiceClients[clientId]);
|
|
}
|
|
|
|
registerVoiceClient(clientId: number) {
|
|
const client = new NativeVoiceClientWrapper(this.native.register_client(clientId));
|
|
client.events.on("notify_state_changed", this.voiceClientStateChangedEventListener);
|
|
this.registeredVoiceClients[clientId] = client;
|
|
return client;
|
|
}
|
|
|
|
unregisterVoiceClient(client: VoiceClient) {
|
|
if(!(client instanceof NativeVoiceClientWrapper))
|
|
throw "invalid client type";
|
|
|
|
delete this.registeredVoiceClients[client.getClientId()];
|
|
this.native.unregister_client(client.getClientId());
|
|
client.destroy();
|
|
}
|
|
|
|
stopAllVoiceReplays() {
|
|
this.availableVoiceClients().forEach(client => client.abortReplay());
|
|
}
|
|
|
|
/* whisper API */
|
|
getWhisperSessionInitializer(): WhisperSessionInitializer | undefined {
|
|
return undefined;
|
|
}
|
|
|
|
getWhisperSessions(): WhisperSession[] {
|
|
return [];
|
|
}
|
|
|
|
getWhisperTarget(): WhisperTarget | undefined {
|
|
return undefined;
|
|
}
|
|
|
|
setWhisperSessionInitializer(initializer: WhisperSessionInitializer | undefined) {
|
|
}
|
|
|
|
startWhisper(target: WhisperTarget): Promise<void> {
|
|
return Promise.resolve(undefined);
|
|
}
|
|
|
|
dropWhisperSession(session: WhisperSession) {
|
|
}
|
|
|
|
stopWhisper() {
|
|
}
|
|
|
|
getConnectionStats(): Promise<ConnectionStatistics> {
|
|
/* FIXME: This is iffy! */
|
|
const stats = (this.connection as any as NativeServerConnection)["nativeHandle"]?.statistics();
|
|
|
|
return Promise.resolve({
|
|
bytesSend: stats?.voice_bytes_send ? stats?.voice_bytes_send : 0,
|
|
bytesReceived: stats?.voice_bytes_received ? stats?.voice_bytes_received : 0
|
|
});
|
|
}
|
|
|
|
getRetryTimestamp(): number | 0 {
|
|
return Date.now();
|
|
}
|
|
}
|
|
|
|
class NativeVoiceClientWrapper implements VoiceClient {
|
|
private readonly native: NativeVoiceClient;
|
|
readonly events: Registry<VoicePlayerEvents>;
|
|
private playerState: VoicePlayerState;
|
|
|
|
constructor(native: NativeVoiceClient) {
|
|
this.events = new Registry<VoicePlayerEvents>();
|
|
this.native = native;
|
|
this.playerState = VoicePlayerState.STOPPED;
|
|
|
|
this.native.callback_state_changed = state => {
|
|
switch (state) {
|
|
case PlayerState.BUFFERING:
|
|
this.setState(VoicePlayerState.BUFFERING);
|
|
break;
|
|
|
|
case PlayerState.PLAYING:
|
|
this.setState(VoicePlayerState.PLAYING);
|
|
break;
|
|
|
|
case PlayerState.STOPPED:
|
|
this.setState(VoicePlayerState.STOPPED);
|
|
break;
|
|
|
|
case PlayerState.STOPPING:
|
|
this.setState(VoicePlayerState.STOPPING);
|
|
break;
|
|
}
|
|
}
|
|
|
|
this.resetLatencySettings();
|
|
}
|
|
|
|
destroy() {
|
|
this.events.destroy();
|
|
}
|
|
|
|
abortReplay() {
|
|
this.native.abort_replay();
|
|
}
|
|
|
|
flushBuffer() {
|
|
this.native.get_stream().flush_buffer();
|
|
}
|
|
|
|
getClientId(): number {
|
|
return this.native.client_id;
|
|
}
|
|
|
|
getState(): VoicePlayerState {
|
|
return this.playerState;
|
|
}
|
|
|
|
private setState(state: VoicePlayerState) {
|
|
if(this.playerState === state) {
|
|
return;
|
|
}
|
|
|
|
const oldState = this.playerState;
|
|
this.playerState = state;
|
|
this.events.fire("notify_state_changed", { oldState: oldState, newState: state });
|
|
}
|
|
|
|
setVolume(volume: number) {
|
|
this.native.set_volume(volume);
|
|
}
|
|
|
|
getVolume(): number {
|
|
return this.native.get_volume();
|
|
}
|
|
|
|
resetLatencySettings() {
|
|
const stream = this.native.get_stream();
|
|
stream.set_buffer_latency(0.080);
|
|
stream.set_buffer_max_latency(0.5);
|
|
}
|
|
|
|
setLatencySettings(settings: VoicePlayerLatencySettings) {
|
|
const stream = this.native.get_stream();
|
|
stream.set_buffer_latency(settings.minBufferTime / 1000);
|
|
stream.set_buffer_max_latency(settings.maxBufferTime / 1000);
|
|
}
|
|
|
|
getLatencySettings(): Readonly<VoicePlayerLatencySettings> {
|
|
const stream = this.native.get_stream();
|
|
|
|
return {
|
|
maxBufferTime: stream.get_buffer_max_latency() * 1000,
|
|
minBufferTime: stream.get_buffer_latency() * 1000
|
|
};
|
|
}
|
|
} |