mirror of
https://github.com/saitohirga/WSJT-X.git
synced 2024-10-03 02:26:45 -04:00
3fcb73b107
the main GUI thread (thanks to G4WJS). 2.. Also, for the record, some example code for using QAudioInput instead of PortAudio. This code is not presently active, and will need to be changed to accommodate the changes in #1, above. But the basic ideas are here... git-svn-id: svn+ssh://svn.code.sf.net/p/wsjt/wsjt/branches/wsjtx@3509 ab8295b8-cf94-4d9e-aec4-7959e3be5d79
187 lines
5.3 KiB
C++
187 lines
5.3 KiB
C++
#include "soundin.h"
|
|
#include <stdexcept>
|
|
|
|
#define FRAMES_PER_BUFFER 1024
|
|
//#define NSMAX 1365
|
|
#define NSMAX 6827
|
|
#define NTMAX 120
|
|
|
|
extern "C" {
|
|
#include <portaudio.h>
|
|
extern struct {
|
|
float ss[184*NSMAX]; //This is "common/jt9com/..." in fortran
|
|
float savg[NSMAX];
|
|
// float c0[2*NTMAX*1500];
|
|
short int d2[NTMAX*12000];
|
|
int nutc; //UTC as integer, HHMM
|
|
int ndiskdat; //1 ==> data read from *.wav file
|
|
int ntrperiod; //TR period (seconds)
|
|
int mousefqso; //User-selected QSO freq (kHz)
|
|
int newdat; //1 ==> new data, must do long FFT
|
|
int npts8; //npts in c0() array
|
|
int nfa; //Low decode limit (Hz)
|
|
int nfb; //High decode limit (Hz)
|
|
int ntol; //+/- decoding range around fQSO (Hz)
|
|
int kin;
|
|
int nzhsym;
|
|
int nsave;
|
|
int nagain;
|
|
int ndepth;
|
|
int ntxmode;
|
|
int nmode;
|
|
char datetime[20];
|
|
} jt9com_;
|
|
}
|
|
|
|
//--------------------------------------------------------------- a2dCallback
|
|
int a2dCallback( const void *inputBuffer, void * /* outputBuffer */,
|
|
unsigned long framesToProcess,
|
|
const PaStreamCallbackTimeInfo * /* timeInfo */,
|
|
PaStreamCallbackFlags statusFlags,
|
|
void *userData )
|
|
|
|
// This routine called by the PortAudio engine when samples are available.
|
|
// It may be called at interrupt level, so don't do anything
|
|
// that could mess up the system like calling malloc() or free().
|
|
|
|
{
|
|
SoundInput::CallbackData * udata = reinterpret_cast<SoundInput::CallbackData *>(userData);
|
|
int nbytes,k;
|
|
|
|
udata->ncall++;
|
|
if( (statusFlags&paInputOverflow) != 0) {
|
|
qDebug() << "Input Overflow in a2dCallback";
|
|
}
|
|
if(udata->bzero) { //Start of a new Rx sequence
|
|
udata->kin=0; //Reset buffer pointer
|
|
udata->bzero=false;
|
|
}
|
|
|
|
nbytes=2*framesToProcess; //Bytes per frame
|
|
k=udata->kin;
|
|
if(udata->monitoring) {
|
|
memcpy(&jt9com_.d2[k],inputBuffer,nbytes); //Copy all samples to d2
|
|
}
|
|
udata->kin += framesToProcess;
|
|
jt9com_.kin=udata->kin;
|
|
|
|
return paContinue;
|
|
}
|
|
|
|
SoundInput::SoundInput()
|
|
: m_inStream(0),
|
|
m_dataSinkBusy(false),
|
|
m_TRperiod(60),
|
|
m_nsps(6912),
|
|
m_monitoring(false),
|
|
m_intervalTimer(this)
|
|
{
|
|
connect(&m_intervalTimer, &QTimer::timeout, this, &SoundInput::intervalNotify);
|
|
}
|
|
|
|
void SoundInput::start(qint32 device)
|
|
{
|
|
stop();
|
|
|
|
//---------------------------------------------------- Soundcard Setup
|
|
PaError paerr;
|
|
PaStreamParameters inParam;
|
|
|
|
m_callbackData.kin=0; //Buffer pointer
|
|
m_callbackData.ncall=0; //Number of callbacks
|
|
m_callbackData.bzero=false; //Flag to request reset of kin
|
|
m_callbackData.monitoring=m_monitoring;
|
|
|
|
inParam.device=device; //### Input Device Number ###
|
|
inParam.channelCount=1; //Number of analog channels
|
|
inParam.sampleFormat=paInt16; //Get i*2 from Portaudio
|
|
inParam.suggestedLatency=0.05;
|
|
inParam.hostApiSpecificStreamInfo=NULL;
|
|
|
|
paerr=Pa_IsFormatSupported(&inParam,NULL,12000.0);
|
|
if(paerr<0) {
|
|
emit error("PortAudio says requested soundcard format not supported.");
|
|
// return;
|
|
}
|
|
qDebug() << "";
|
|
paerr=Pa_OpenStream(&m_inStream, //Input stream
|
|
&inParam, //Input parameters
|
|
NULL, //No output parameters
|
|
12000.0, //Sample rate
|
|
FRAMES_PER_BUFFER, //Frames per buffer
|
|
// paClipOff+paDitherOff, //No clipping or dithering
|
|
paClipOff, //No clipping
|
|
a2dCallback, //Input callback routine
|
|
&m_callbackData); //userdata
|
|
paerr=Pa_StartStream(m_inStream);
|
|
if(paerr<0) {
|
|
emit error("Failed to start audio input stream.");
|
|
return;
|
|
}
|
|
m_ntr0 = 99; // initial value higher than any expected
|
|
m_nBusy = 0;
|
|
m_intervalTimer.start(100);
|
|
m_ms0 = QDateTime::currentMSecsSinceEpoch();
|
|
m_nsps0 = 0;
|
|
}
|
|
|
|
void SoundInput::intervalNotify()
|
|
{
|
|
m_callbackData.monitoring=m_monitoring;
|
|
qint64 ms = QDateTime::currentMSecsSinceEpoch();
|
|
m_SamFacIn=1.0;
|
|
if(m_callbackData.ncall>100) {
|
|
m_SamFacIn=m_callbackData.ncall*FRAMES_PER_BUFFER*1000.0/(12000.0*(ms-m_ms0-50));
|
|
}
|
|
ms=ms % 86400000;
|
|
int nsec = ms/1000; // Time according to this computer
|
|
int ntr = nsec % m_TRperiod;
|
|
|
|
// Reset buffer pointer and symbol number at start of minute
|
|
if(ntr < m_ntr0 or !m_monitoring or m_nsps!=m_nsps0) {
|
|
m_nstep0=0;
|
|
m_nsps0=m_nsps;
|
|
m_callbackData.bzero=true;
|
|
}
|
|
int k=m_callbackData.kin;
|
|
if(m_monitoring) {
|
|
int kstep=m_nsps/2;
|
|
// m_step=k/kstep;
|
|
m_step=(k-1)/kstep;
|
|
if(m_step != m_nstep0) {
|
|
if(m_dataSinkBusy) {
|
|
m_nBusy++;
|
|
} else {
|
|
// m_dataSinkBusy=true;
|
|
// emit readyForFFT(k); //Signal to compute new FFTs
|
|
emit readyForFFT(k-1); //Signal to compute new FFTs
|
|
}
|
|
m_nstep0=m_step;
|
|
}
|
|
}
|
|
m_ntr0=ntr;
|
|
}
|
|
|
|
SoundInput::~SoundInput()
|
|
{
|
|
if (m_inStream)
|
|
{
|
|
Pa_CloseStream(m_inStream), m_inStream = 0;
|
|
}
|
|
}
|
|
|
|
void SoundInput::stop()
|
|
{
|
|
m_intervalTimer.stop();
|
|
if (m_inStream)
|
|
{
|
|
Pa_StopStream(m_inStream);
|
|
Pa_CloseStream(m_inStream), m_inStream = 0;
|
|
}
|
|
}
|
|
|
|
void SoundInput::setMonitoring(bool b)
|
|
{
|
|
m_monitoring = b;
|
|
}
|