// WaveAudioSource.cpp: implementation of the WaveAudioSource class.
|
//
|
//////////////////////////////////////////////////////////////////////
|
|
#include "AudioSample.h"
|
#include "AudioSampleManager.h"
|
#include "WaveAudioSource.h"
|
|
using namespace std;
|
|
//////////////////////////////////////////////////////////////////////
|
// Construction/Destruction
|
//////////////////////////////////////////////////////////////////////
|
|
WaveAudioSource::WaveAudioSource()
|
{
|
char subFacilityName[100];
|
sprintf(subFacilityName, "WaveAudioSource:%x", this);
|
tracer.SetSubFacilityName(subFacilityName);
|
SetTraceLevel();
|
tracer.tracef(EE, "Constructor begin\n");
|
numBuffers = WAVESOURCE_NUM_WAVEHDR;
|
bufferSize = WAVESOURCE_DEFAULT_WAVEHDR_SIZE;
|
InitializeCriticalSection(&waveInMutex);
|
InitializeCriticalSection(&bufferListMutex);
|
InitializeCriticalSection(&filterMutex);
|
dataEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
|
bEnableAddBuffer = false;
|
hWaveIn = NULL;
|
waveinDeviceID = WAVE_MAPPER;
|
buffersToAdd.clear();
|
bRunning = false;
|
needToSetVolume = false;
|
micVolume = 0;
|
|
// set default format
|
SetFormat(WaveFormat_PCM_16_8_1);
|
}
|
|
WaveAudioSource::~WaveAudioSource()
|
{
|
tracer.tracef(EE, "Destructor begin\n");
|
EnterCriticalSection(&filterMutex);
|
tracer.tracef(ARB, "WaveAudioSource~ : entered filterMutex\n");
|
LeaveCriticalSection(&filterMutex);
|
tracer.tracef(ARB, "WaveAudioSource~ : left filterMutex\n");
|
DeleteCriticalSection(&waveInMutex);
|
DeleteCriticalSection(&bufferListMutex);
|
DeleteCriticalSection(&filterMutex);
|
CloseHandle(dataEvent);
|
tracer.tracef(EE, "Destructor end\n");
|
}
|
|
int
|
WaveAudioSource::SetTraceLevel()
|
{
|
long SystemMask = 0;
|
if ((SystemMask = GetRegKeyLong(HKEY_CURRENT_USER, "Software\\Cisco Systems\\MTC\\Tracing", "AllComponents", 0x0)) == 0)
|
{
|
SystemMask = GetRegKeyLong(HKEY_CURRENT_USER, "Software\\Cisco Systems\\MTC\\Tracing", "WaveAudioSource", 0x100000);
|
}
|
tracer.SetSystemMask(SystemMask);
|
return 0;
|
}
|
|
int
|
WaveAudioSource::SetFormat(int waveFormatNumber)
|
{
|
tracer.tracef(EE, "SetFormat %d : bRunning = %d\n", waveFormatNumber, bRunning);
|
if (!bRunning)
|
{
|
format = WaveFormat::GetWaveFormat(waveFormatNumber);
|
}
|
tracer.tracef(EE, "~SetFormat\n");
|
return 0;
|
}
|
|
int
|
WaveAudioSource::SetVolume(unsigned long vvolume)
|
{
|
if (bRunning)
|
{
|
MMRESULT rc; // Return code.
|
HMIXER hMixer; // Mixer handle used in mixer API calls.
|
MIXERCONTROL mxc; // Holds the mixer control data.
|
MIXERLINE mxl; // Holds the mixer line data.
|
MIXERLINECONTROLS mxlc; // Obtains the mixer control.
|
bool volumeChanged = false;
|
|
// Open the mixer. This opens the mixer with a deviceID of 0. If you
|
// have a single sound card/mixer, then this will open it. If you have
|
// multiple sound cards/mixers, the deviceIDs will be 0, 1, 2, and
|
// so on.
|
rc = mixerOpen(&hMixer, (UINT)hWaveIn, 0, 0, MIXER_OBJECTF_HWAVEIN);
|
if (MMSYSERR_NOERROR == rc)
|
{
|
tracer.tracef(DET, "SetVolume : mixerOpen succeeded\n");
|
do
|
{
|
// Initialize MIXERLINE structure.
|
ZeroMemory(&mxl,sizeof(mxl));
|
mxl.cbStruct = sizeof(mxl);
|
|
// Specify the line you want to get. You are getting the input line
|
// here. If you want to get the output line, you need to use
|
// MIXERLINE_COMPONENTTYPE_SRC_WAVEOUT.
|
mxl.dwComponentType = MIXERLINE_COMPONENTTYPE_DST_WAVEIN;
|
|
rc = mixerGetLineInfo((HMIXEROBJ)hMixer, &mxl,
|
MIXER_GETLINEINFOF_COMPONENTTYPE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerGetLineInfo((HMIXEROBJ)hMixer, &mxl, MIXER_GETLINEINFOF_COMPONENTTYPE) : %d\n", rc);
|
break;
|
}
|
|
// now find out the microphone input for that line (the ADC)
|
int numConnections = mxl.cConnections;
|
tracer.tracef(DET, "SetVolume : mixerGetLineInfo succeeded. numConnections = %d\n", numConnections);
|
DWORD destinationLineID = mxl.dwDestination;
|
for (int j=0; j<numConnections; j++)
|
{
|
mxl.cbStruct = sizeof(MIXERLINE);
|
mxl.dwDestination = destinationLineID;
|
mxl.dwSource = j;
|
|
rc = mixerGetLineInfo((HMIXEROBJ)hMixer, &mxl, MIXER_GETLINEINFOF_SOURCE);
|
if (rc == MMSYSERR_NOERROR)
|
{
|
tracer.tracef(DET, "SetVolume : mixerGetLineInfo succeeded. dwComponentType %d = 0x%x\n", j, mxl.dwComponentType);
|
if (mxl.dwComponentType == MIXERLINE_COMPONENTTYPE_SRC_MICROPHONE ||
|
mxl.dwComponentType == MIXERLINE_COMPONENTTYPE_SRC_UNDEFINED)
|
{
|
// Get the control.
|
ZeroMemory(&mxlc, sizeof(mxlc));
|
mxlc.cbStruct = sizeof(mxlc);
|
mxlc.dwLineID = mxl.dwLineID;
|
mxlc.dwControlType = MIXERCONTROL_CONTROLTYPE_VOLUME;
|
mxlc.cControls = 1;
|
mxlc.cbmxctrl = sizeof(mxc);
|
mxlc.pamxctrl = &mxc;
|
ZeroMemory(&mxc, sizeof(mxc));
|
mxc.cbStruct = sizeof(mxc);
|
rc = mixerGetLineControls((HMIXEROBJ)hMixer,&mxlc,
|
MIXER_GETLINECONTROLSF_ONEBYTYPE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerGetLineControls((HMIXEROBJ)hMixer,&mxlc, MIXER_GETLINECONTROLSF_ONEBYTYPE) : %d. Could not get MIXERCONTROL_CONTROLTYPE_VOLUME\n", rc);
|
break;
|
}
|
|
// After successfully getting the peakmeter control, the volume range
|
// will be specified by mxc.Bounds.lMinimum to mxc.Bounds.lMaximum.
|
|
MIXERCONTROLDETAILS mxcd; // Gets the control values.
|
MIXERCONTROLDETAILS_UNSIGNED volStruct; // Gets the control values.
|
MIXERCONTROLDETAILS_BOOLEAN muteStruct; // Get Mute Setting
|
long volume; // Holds the final volume value.
|
|
// Initialize the MIXERCONTROLDETAILS structure
|
ZeroMemory(&mxcd, sizeof(mxcd));
|
mxcd.cbStruct = sizeof(mxcd);
|
mxcd.cbDetails = sizeof(volStruct);
|
mxcd.dwControlID = mxc.dwControlID;
|
mxcd.paDetails = &volStruct;
|
mxcd.cChannels = 1;
|
|
// Get the current value of the volume control. Typically, you
|
// would set a timer in your program to query the volume every 10th
|
// of a second or so.
|
rc = mixerGetControlDetails((HMIXEROBJ)hMixer, &mxcd,
|
MIXER_GETCONTROLDETAILSF_VALUE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerGetControlDetails((HMIXEROBJ)hMixer, &mxcd, MIXER_GETCONTROLDETAILSF_VALUE) : %d\n", rc);
|
break;
|
}
|
volume = volStruct.dwValue;
|
|
volStruct.dwValue = (0xffff * vvolume) / 100;
|
rc = mixerSetControlDetails((HMIXEROBJ)hMixer, &mxcd,
|
MIXER_SETCONTROLDETAILSF_VALUE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerSetControlDetails((HMIXEROBJ)hMixer, &mxcd, MIXER_SETCONTROLDETAILSF_VALUE) : %d\n", rc);
|
break;
|
}
|
|
volumeChanged = true;
|
|
// Get the mute control.
|
ZeroMemory(&mxlc, sizeof(mxlc));
|
mxlc.cbStruct = sizeof(mxlc);
|
mxlc.dwLineID = mxl.dwLineID;
|
mxlc.dwControlType = MIXERCONTROL_CONTROLTYPE_MUTE;
|
mxlc.cControls = 1;
|
mxlc.cbmxctrl = sizeof(mxc);
|
mxlc.pamxctrl = &mxc;
|
ZeroMemory(&mxc, sizeof(mxc));
|
mxc.cbStruct = sizeof(mxc);
|
rc = mixerGetLineControls((HMIXEROBJ)hMixer,&mxlc,
|
MIXER_GETLINECONTROLSF_ONEBYTYPE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerGetLineControls((HMIXEROBJ)hMixer,&mxlc, MIXER_GETLINECONTROLSF_ONEBYTYPE) : %d. Could not get MIXERCONTROL_CONTROLTYPE_VOLUME\n", rc);
|
if (mxl.dwComponentType != MIXERLINE_COMPONENTTYPE_SRC_MICROPHONE)
|
{
|
continue;
|
}
|
else
|
{
|
break;
|
}
|
}
|
|
ZeroMemory(&mxcd, sizeof(mxcd));
|
mxcd.cbStruct = sizeof(mxcd);
|
mxcd.cbDetails = sizeof(muteStruct);
|
mxcd.dwControlID = mxc.dwControlID;
|
mxcd.paDetails = &muteStruct;
|
mxcd.cChannels = 1;
|
|
// Get the current value of the mute control. Typically, you
|
// would set a timer in your program to query the volume every 10th
|
// of a second or so.
|
rc = mixerGetControlDetails((HMIXEROBJ)hMixer, &mxcd,
|
MIXER_GETCONTROLDETAILSF_VALUE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerGetControlDetails((HMIXEROBJ)hMixer, &mxcd, MIXER_GETCONTROLDETAILSF_VALUE) : %d\n", rc);
|
break;
|
}
|
muteStruct.fValue = vvolume == 0 ? 1 : 0;
|
rc = mixerSetControlDetails((HMIXEROBJ)hMixer, &mxcd,
|
MIXER_SETCONTROLDETAILSF_VALUE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerSetControlDetails((HMIXEROBJ)hMixer, &mxcd, MIXER_SETCONTROLDETAILSF_VALUE) : %d\n", rc);
|
break;
|
}
|
break;
|
}
|
}
|
}
|
} while (false);
|
if (volumeChanged == false)
|
{
|
do
|
{
|
// Initialize MIXERLINE structure.
|
ZeroMemory(&mxl,sizeof(mxl));
|
mxl.cbStruct = sizeof(mxl);
|
|
// Specify the line you want to get. You are getting the input line
|
// here. If you want to get the output line, you need to use
|
// MIXERLINE_COMPONENTTYPE_SRC_WAVEOUT.
|
mxl.dwComponentType = MIXERLINE_COMPONENTTYPE_DST_WAVEIN;
|
|
rc = mixerGetLineInfo((HMIXEROBJ)hMixer, &mxl,
|
MIXER_GETLINEINFOF_COMPONENTTYPE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerGetLineInfo((HMIXEROBJ)hMixer, &mxl, MIXER_GETLINEINFOF_COMPONENTTYPE) : %d\n", rc);
|
break;
|
}
|
|
// now find out the microphone input for that line (the ADC)
|
int numConnections = mxl.cConnections;
|
tracer.tracef(DET, "SetVolume : mixerGetLineInfo succeeded. numConnections = %d\n", numConnections);
|
DWORD destinationLineID = mxl.dwDestination;
|
|
// Get the control.
|
ZeroMemory(&mxlc, sizeof(mxlc));
|
mxlc.cbStruct = sizeof(mxlc);
|
mxlc.dwLineID = mxl.dwLineID;
|
mxlc.dwControlType = MIXERCONTROL_CONTROLTYPE_VOLUME;
|
mxlc.cControls = 1;
|
mxlc.cbmxctrl = sizeof(mxc);
|
mxlc.pamxctrl = &mxc;
|
ZeroMemory(&mxc, sizeof(mxc));
|
mxc.cbStruct = sizeof(mxc);
|
rc = mixerGetLineControls((HMIXEROBJ)hMixer,&mxlc,
|
MIXER_GETLINECONTROLSF_ONEBYTYPE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerGetLineControls((HMIXEROBJ)hMixer,&mxlc, MIXER_GETLINECONTROLSF_ONEBYTYPE) : %d. Could not get MIXERCONTROL_CONTROLTYPE_VOLUME\n", rc);
|
if (mxl.dwComponentType != MIXERLINE_COMPONENTTYPE_SRC_MICROPHONE)
|
{
|
continue;
|
}
|
else
|
{
|
break;
|
}
|
}
|
MIXERCONTROLDETAILS mxcd; // Gets the control values.
|
MIXERCONTROLDETAILS_UNSIGNED volStruct; // Gets the control values.
|
MIXERCONTROLDETAILS_BOOLEAN muteStruct; // Get Mute Setting
|
long volume; // Holds the final volume value.
|
|
// Initialize the MIXERCONTROLDETAILS structure
|
ZeroMemory(&mxcd, sizeof(mxcd));
|
mxcd.cbStruct = sizeof(mxcd);
|
mxcd.cbDetails = sizeof(volStruct);
|
mxcd.dwControlID = mxc.dwControlID;
|
mxcd.paDetails = &volStruct;
|
mxcd.cChannels = 1;
|
|
// Get the current value of the peakmeter control. Typically, you
|
// would set a timer in your program to query the volume every 10th
|
// of a second or so.
|
rc = mixerGetControlDetails((HMIXEROBJ)hMixer, &mxcd,
|
MIXER_GETCONTROLDETAILSF_VALUE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerGetControlDetails((HMIXEROBJ)hMixer, &mxcd, MIXER_GETCONTROLDETAILSF_VALUE) : %d\n", rc);
|
break;
|
}
|
volume = volStruct.dwValue;
|
|
volStruct.dwValue = (0xffff * vvolume) / 100;
|
rc = mixerSetControlDetails((HMIXEROBJ)hMixer, &mxcd,
|
MIXER_SETCONTROLDETAILSF_VALUE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerSetControlDetails((HMIXEROBJ)hMixer, &mxcd, MIXER_SETCONTROLDETAILSF_VALUE) : %d\n", rc);
|
break;
|
}
|
volumeChanged = true;
|
|
// Get the mute control.
|
ZeroMemory(&mxlc, sizeof(mxlc));
|
mxlc.cbStruct = sizeof(mxlc);
|
mxlc.dwLineID = mxl.dwLineID;
|
mxlc.dwControlType = MIXERCONTROL_CONTROLTYPE_MUTE;
|
mxlc.cControls = 1;
|
mxlc.cbmxctrl = sizeof(mxc);
|
mxlc.pamxctrl = &mxc;
|
ZeroMemory(&mxc, sizeof(mxc));
|
mxc.cbStruct = sizeof(mxc);
|
rc = mixerGetLineControls((HMIXEROBJ)hMixer,&mxlc,
|
MIXER_GETLINECONTROLSF_ONEBYTYPE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerGetLineControls((HMIXEROBJ)hMixer,&mxlc, MIXER_GETLINECONTROLSF_ONEBYTYPE) : %d. Could not get MIXERCONTROL_CONTROLTYPE_VOLUME\n", rc);
|
if (mxl.dwComponentType != MIXERLINE_COMPONENTTYPE_SRC_MICROPHONE)
|
{
|
continue;
|
}
|
else
|
{
|
break;
|
}
|
}
|
|
ZeroMemory(&mxcd, sizeof(mxcd));
|
mxcd.cbStruct = sizeof(mxcd);
|
mxcd.cbDetails = sizeof(muteStruct);
|
mxcd.dwControlID = mxc.dwControlID;
|
mxcd.paDetails = &muteStruct;
|
mxcd.cChannels = 1;
|
|
// Get the current value of the mute control. Typically, you
|
// would set a timer in your program to query the volume every 10th
|
// of a second or so.
|
rc = mixerGetControlDetails((HMIXEROBJ)hMixer, &mxcd,
|
MIXER_GETCONTROLDETAILSF_VALUE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerGetControlDetails((HMIXEROBJ)hMixer, &mxcd, MIXER_GETCONTROLDETAILSF_VALUE) : %d\n", rc);
|
break;
|
}
|
muteStruct.fValue = vvolume == 0 ? 1 : 0;
|
rc = mixerSetControlDetails((HMIXEROBJ)hMixer, &mxcd,
|
MIXER_SETCONTROLDETAILSF_VALUE);
|
if (MMSYSERR_NOERROR != rc)
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerSetControlDetails((HMIXEROBJ)hMixer, &mxcd, MIXER_SETCONTROLDETAILSF_VALUE) : %d\n", rc);
|
break;
|
}
|
break;
|
} while (false);
|
}
|
mixerClose(hMixer);
|
}
|
else
|
{
|
tracer.tracef(ERR, "SetVolume : e-mixerOpen : %d\n", rc);
|
}
|
}
|
else
|
{
|
needToSetVolume = true;
|
}
|
micVolume = vvolume;
|
return 0;
|
}
|
|
int
|
WaveAudioSource::SetDeviceID(unsigned int deviceID)
|
{
|
waveinDeviceID = deviceID;
|
return 0;
|
}
|
|
int
|
WaveAudioSource::SetBufferSize(int size)
|
{
|
tracer.tracef(EE, "SetBufferSize %d : bRunning = %d\n", size, bRunning);
|
if (!bRunning)
|
{
|
bufferSize = size;
|
}
|
tracer.tracef(EE, "~SetBufferSize\n");
|
return 0;
|
}
|
|
int
|
WaveAudioSource::SetNumBuffers(int num)
|
{
|
tracer.tracef(EE, "SetNumBuffers %d : bRunning = %d\n", num, bRunning);
|
if (!bRunning)
|
{
|
numBuffers = num;
|
}
|
tracer.tracef(EE, "~SetNumBuffers\n");
|
return 0;
|
}
|
|
int
|
WaveAudioSource::BeginAudioCapture()
|
{
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "BeginAudioCapture\n");
|
HRESULT result = NOERROR;
|
tracer.tracef(SDI_LEVEL_DETAILED, "BeginAudioCapture : Entering bufferListMutex\n");
|
EnterCriticalSection(&bufferListMutex);
|
buffersToAdd.clear();
|
LeaveCriticalSection(&bufferListMutex);
|
tracer.tracef(SDI_LEVEL_DETAILED, "BeginAudioCapture : Left bufferListMutex\n");
|
|
audioSampleManager = AudioSampleManager::GetInstance();
|
|
WaveFormat::TraceFormat(&tracer, ARB, format);
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "BeginAudioCapture : waveInOpen\n");
|
MMRESULT mr = waveInOpen(&hWaveIn, waveinDeviceID, &format, (DWORD)WaveInCallback, DWORD(this), CALLBACK_FUNCTION);
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "BeginAudioCapture : ~waveInOpen\n");
|
if (mr != NOERROR)
|
{
|
tracer.tracef(SDI_LEVEL_ERROR, "BeginAudioCapture : e-waveInOpen : 0x%x\n", mr);
|
WaveFormat::TraceFormat(&tracer, ERR, format);
|
Sleep(1000);
|
tracer.tracef(SDI_LEVEL_ERROR, "BeginAudioCapture : waveInOpen : 2nd try\n");
|
mr = waveInOpen(&hWaveIn, waveinDeviceID, &format, (DWORD)WaveInCallback, DWORD(this), CALLBACK_FUNCTION);
|
tracer.tracef(SDI_LEVEL_ERROR, "BeginAudioCapture : ~waveInOpen : returned 0x%x\n", mr);
|
if (mr != NOERROR)
|
{
|
tracer.tracef(SDI_LEVEL_ERROR, "BeginAudioCapture : e-waveInOpen : 0x%x\n", mr);
|
WaveFormat::TraceFormat(&tracer, ERR, format);
|
result = -10;
|
}
|
}
|
if (result == 0)
|
{
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "BeginAudioCapture : Entering waveInMutex\n");
|
EnterCriticalSection(&waveInMutex);
|
bEnableAddBuffer = true;
|
for (int i=0; i<numBuffers; i++) {
|
waveHdr[i].lpData = dataBuffer[i];
|
waveHdr[i].dwBufferLength = bufferSize;
|
waveHdr[i].dwFlags = 0;
|
waveHdr[i].dwUser = i;
|
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "BeginAudioCapture : waveInPrepareHeader\n");
|
result = waveInPrepareHeader(hWaveIn, &(waveHdr[i]), sizeof(waveHdr[i]));
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "BeginAudioCapture : ~waveInPrepareHeader\n");
|
if (result == MMSYSERR_NOERROR)
|
{
|
AddBuffer(i);
|
}
|
else
|
{
|
tracer.tracef(SDI_LEVEL_ERROR, "BeginAudioCapture : e-waveInPrepareHeader : 0x%x\n", result);
|
}
|
}
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "BeginAudioCapture : waveInStart\n");
|
result = waveInStart(hWaveIn);
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "BeginAudioCapture : ~waveInStart\n");
|
if (result != MMSYSERR_NOERROR)
|
{
|
tracer.tracef(SDI_LEVEL_ERROR, "BeginAudioCapture : e-waveInStart : 0x%x\n", result);
|
result = -20;
|
}
|
LeaveCriticalSection(&waveInMutex);
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "BeginAudioCapture : Left waveInMutex\n");
|
}
|
if (result == 0)
|
{
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "~BeginAudioCapture\n");
|
}
|
else
|
{
|
tracer.tracef(ERR, "~BeginAudioCapture : returning %d\n", result);
|
}
|
|
return result;
|
}
|
|
|
int
|
WaveAudioSource::EndAudioCapture()
|
{
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "EndAudioCapture\n");
|
tracer.tracef(SDI_LEVEL_DETAILED, "EndAudioCapture : entering waveInMutex\n");
|
EnterCriticalSection(&waveInMutex);
|
bEnableAddBuffer = false;
|
HRESULT result = NOERROR;
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "EndAudioCapture : waveInReset\n");
|
result = waveInReset(hWaveIn);
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "EndAudioCapture : ~waveInReset\n");
|
if (result != MMSYSERR_NOERROR)
|
{
|
tracer.tracef(SDI_LEVEL_ERROR, "EndAudioCapture : e-waveInReset : 0x%x\n", result);
|
}
|
else
|
{
|
LeaveCriticalSection(&waveInMutex);
|
tracer.tracef(SDI_LEVEL_DETAILED, "EndAudioCapture : left waveInMutex\n");
|
while(true)
|
{
|
tracer.tracef(DET, "EndAudioCapture : entering bufferListMutex\n");
|
EnterCriticalSection(&bufferListMutex);
|
tracer.tracef(DET, "EndAudioCapture : buffersToAdd.size = %d, numBuffers = %d\n", buffersToAdd.size(), numBuffers);
|
if (buffersToAdd.size() < numBuffers)
|
{
|
LeaveCriticalSection(&bufferListMutex);
|
tracer.tracef(DET, "EndAudioCapture : left bufferListMutex. Waiting for dataEvent\n");
|
WaitForSingleObject(dataEvent, INFINITE);
|
tracer.tracef(DET, "EndAudioCapture : got dataEvent\n");
|
continue;
|
}
|
else
|
{
|
LeaveCriticalSection(&bufferListMutex);
|
tracer.tracef(DET, "EndAudioCapture : left bufferListMutex\n");
|
break;
|
}
|
}
|
tracer.tracef(DET, "EndAudioCapture : entering waveInMutex\n");
|
EnterCriticalSection(&waveInMutex);
|
for (int i=0; i<numBuffers; i++) {
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "EndAudioCapture : waveInUnprepareHeader\n");
|
result = waveInUnprepareHeader(hWaveIn, &(waveHdr[i]), sizeof(waveHdr[i]));
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "EndAudioCapture : ~waveInUnprepareHeader\n");
|
if (result != MMSYSERR_NOERROR)
|
{
|
tracer.tracef(SDI_LEVEL_ERROR, "EndAudioCapture : e-waveInUnprepareHeader : 0x%x\n", result);
|
}
|
}
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "EndAudioCapture : waveInClose\n");
|
result = waveInClose(hWaveIn);
|
tracer.tracef(SDI_LEVEL_ARBITRARY, "EndAudioCapture : ~waveInClose\n");
|
if (result != MMSYSERR_NOERROR)
|
{
|
tracer.tracef(SDI_LEVEL_ERROR, "EndAudioCapture : e-waveInClose : 0x%x\n", result);
|
}
|
hWaveIn = NULL;
|
}
|
LeaveCriticalSection(&waveInMutex);
|
tracer.tracef(SDI_LEVEL_DETAILED, "EndAudioCapture : Left waveInMutex\n");
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "~EndAudioCapture\n");
|
return result;
|
}
|
|
|
int
|
WaveAudioSource::ResetAudioCapture()
|
{
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "ResetAudioCapture\n");
|
HRESULT result = NOERROR;
|
result = EndAudioCapture();
|
if (result == MMSYSERR_NOERROR)
|
{
|
result = BeginAudioCapture();
|
}
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "~ResetAudioCapture\n");
|
return result;
|
}
|
|
|
int
|
WaveAudioSource::AddBuffer(int i)
|
{
|
tracer.tracef(SDI_LEVEL_DETAILED, "AddBuffer %d\n", i);
|
HRESULT result = NOERROR;
|
tracer.tracef(SDI_LEVEL_DETAILED, "AddBuffer : Entering waveInMutex\n");
|
EnterCriticalSection(&waveInMutex);
|
bool bEnabled = bEnableAddBuffer;
|
if (bEnabled)
|
{
|
tracer.tracef(DET, "AddBuffer : waveInAddBuffer\n");
|
result = waveInAddBuffer(hWaveIn, &(waveHdr[i]), sizeof(waveHdr[i]));
|
tracer.tracef(DET, "AddBuffer : ~waveInAddBuffer\n");
|
LeaveCriticalSection(&waveInMutex);
|
tracer.tracef(DET, "AddBuffer : Left waveInMutex\n");
|
if (result != MMSYSERR_NOERROR)
|
{
|
tracer.tracef(SDI_LEVEL_ERROR, "AddBuffer : e-waveInAddBuffer : 0x%x\n", result);
|
result = -10;
|
}
|
}
|
else
|
{
|
LeaveCriticalSection(&waveInMutex);
|
tracer.tracef(SDI_LEVEL_DETAILED, "AddBuffer : !bEnabled. Left waveInMutex. Putting buffer back in buffersToAdd\n");
|
tracer.tracef(SDI_LEVEL_DETAILED, "AddBuffer : Entering bufferListMutex\n");
|
EnterCriticalSection(&bufferListMutex);
|
buffersToAdd.push_back(&(waveHdr[i]));
|
LeaveCriticalSection(&bufferListMutex);
|
SetEvent(dataEvent);
|
tracer.tracef(SDI_LEVEL_DETAILED, "AddBuffer : Left bufferListMutex\n");
|
}
|
tracer.tracef(SDI_LEVEL_DETAILED, "~AddBuffer\n");
|
return result;
|
}
|
|
|
|
// TO avoid problems with wave calls not returning, one has to GUARANTEE that the callback function
|
// will return regardless of the state of any other function. if this function grabs a mutex that is
|
// also entered by other functions, one has to ensure that in no circumstance does the other function
|
// hold in while making a system call (like a wave api call) since those calls may freeze until this
|
// callback function returns. that's the potential deadlock
|
void CALLBACK
|
WaveAudioSource::WaveInCallback(HWAVEIN hWaveIn, UINT uMsg, DWORD dwInstance, DWORD dwParam1, DWORD dwParam2) {
|
WAVEHDR *wHdr;
|
WaveAudioSource *waveAudioSource;
|
waveAudioSource = (WaveAudioSource *)dwInstance;
|
(waveAudioSource->tracer).tracef(SDI_LEVEL_DETAILED, "WaveInCallback\n");
|
switch(uMsg) {
|
case WIM_OPEN:
|
(waveAudioSource->tracer).tracef(SDI_LEVEL_DETAILED, "WaveInCallback : WIM_OPEN\n");
|
break;
|
case WIM_CLOSE:
|
(waveAudioSource->tracer).tracef(SDI_LEVEL_DETAILED, "WaveInCallback : WIM_CLOSE\n");
|
SetEvent(waveAudioSource->dataEvent);
|
break;
|
case WIM_DATA:
|
wHdr = (WAVEHDR *)dwParam1;
|
(waveAudioSource->tracer).tracef(SDI_LEVEL_DETAILED, "WaveInCallback : WIM_DATA %d\n", wHdr->dwUser);
|
(waveAudioSource->tracer).tracef(SDI_LEVEL_DETAILED, "WaveInCallback : Entering bufferListMutex\n");
|
EnterCriticalSection(&(waveAudioSource->bufferListMutex));
|
(waveAudioSource->buffersToAdd).push_back(wHdr);
|
LeaveCriticalSection(&(waveAudioSource->bufferListMutex));
|
SetEvent(waveAudioSource->dataEvent); // we do this within the critical section because fillbuffer resets this within the same critical section - ??? so i go it out
|
(waveAudioSource->tracer).tracef(SDI_LEVEL_DETAILED, "WaveInCallback : Left bufferListMutex\n");
|
break;
|
default:
|
break;
|
}
|
(waveAudioSource->tracer).tracef(SDI_LEVEL_DETAILED, "~WaveInCallback\n");
|
}
|
|
|
int
|
WaveAudioSource::GenerateData(AudioSample **ppAudioSample)
|
{
|
tracer.tracef(SDI_LEVEL_DETAILED, "GenerateData\n");
|
HRESULT result = NOERROR;
|
*ppAudioSample = NULL;
|
bool fireDataEvent = false;
|
|
while (true)
|
{
|
tracer.tracef(SDI_LEVEL_DETAILED, "GenerateData : Entering waveInMutex\n");
|
EnterCriticalSection(&waveInMutex);
|
bool bEnabled = bEnableAddBuffer;
|
LeaveCriticalSection(&waveInMutex);
|
tracer.tracef(SDI_LEVEL_DETAILED, "GenerateData : Left waveInMutex\n");
|
if (!bEnabled)
|
{
|
if (fireDataEvent)
|
{
|
SetEvent(dataEvent);
|
}
|
break;
|
}
|
tracer.tracef(SDI_LEVEL_DETAILED, "GenerateData : Entering bufferListMutex\n");
|
EnterCriticalSection(&bufferListMutex);
|
tracer.tracef(SDI_LEVEL_DETAILED, "GenerateData : buffersToAdd.size = %d\n", buffersToAdd.size());
|
if (buffersToAdd.size() <= 0)
|
{
|
LeaveCriticalSection(&bufferListMutex);
|
tracer.tracef(SDI_LEVEL_DETAILED, "GenerateData : Left bufferListMutex. Waiting for dataEvent\n");
|
WaitForSingleObject(dataEvent, INFINITE);
|
tracer.tracef(DET, "GenerateData : Got dataEvent\n");
|
fireDataEvent = true;
|
continue;
|
}
|
else
|
{
|
WAVEHDR *wHdr = buffersToAdd.front();
|
buffersToAdd.pop_front();
|
LeaveCriticalSection(&bufferListMutex);
|
tracer.tracef(SDI_LEVEL_DETAILED, "GenerateData : Left bufferListMutex\n");
|
AudioSample *audioSample;
|
audioSampleManager->GetAudioSample(&audioSample, this);
|
*ppAudioSample = audioSample;
|
audioSample->SetFormat(format);
|
int bytesToFill = min(wHdr->dwBytesRecorded, audioSample->BufferSize());
|
audioSample->SetDataSize(bytesToFill);
|
|
// check to see if the audio is loud enough
|
int bytesPerSample = format.wBitsPerSample / 8;
|
int numSamples = bytesToFill / bytesPerSample;
|
double total = 0;
|
bool canEncode = false;
|
for (int i=0; i<bytesToFill; i+=bytesPerSample)
|
{
|
double sampleValue;
|
switch(bytesPerSample)
|
{
|
case 1:
|
sampleValue = *((char *)((char *)(wHdr->lpData) + i));
|
break;
|
case 2:
|
sampleValue = *((short *)((char *)(wHdr->lpData) + i));
|
break;
|
case 4:
|
sampleValue = *((int *)((char *)(wHdr->lpData) + i));
|
break;
|
}
|
if (abs(sampleValue) > 4)
|
{
|
canEncode = true;
|
break;
|
}
|
}
|
|
if (micVolume > 0 && canEncode)
|
{
|
tracer.tracef(SDI_LEVEL_DETAILED, "GenerateData : memcpy(0x%x, 0x%x, %d)\n", audioSample->Data(), wHdr->lpData, bytesToFill);
|
memcpy(audioSample->Data(), wHdr->lpData, bytesToFill);
|
}
|
else
|
{
|
audioSample->SetSilenceDuration(audioSample->GetDuration());
|
audioSample->SetDataSize(0);
|
}
|
AddBuffer(wHdr->dwUser);
|
break;
|
}
|
}
|
tracer.tracef(SDI_LEVEL_DETAILED, "~GenerateData\n");
|
return 0;
|
}
|
|
|
int
|
WaveAudioSource::SourceStarted()
|
{
|
int result(0);
|
int returnCode(0);
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "SourceStarted\n");
|
EnterCriticalSection(&filterMutex);
|
tracer.tracef(ARB, "SourceStarted : entered filterMutex\n");
|
do
|
{
|
if (bRunning)
|
{
|
tracer.tracef(ERR, "SourceStarted : e-error : already running\n");
|
result = -10;
|
break;
|
}
|
returnCode = BeginAudioCapture();
|
if (returnCode != 0)
|
{
|
tracer.tracef(ERR, "SourceStarted : e-BeginAudioCapture : %d 0x%x\n", returnCode, returnCode);
|
result = -20;
|
break;
|
}
|
if (needToSetVolume)
|
{
|
SetVolume(micVolume);
|
needToSetVolume = false;
|
}
|
bRunning = true;
|
}
|
while (false);
|
|
LeaveCriticalSection(&filterMutex);
|
tracer.tracef(ARB, "SourceStarted : left filterMutex\n");
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "~SourceStarted : returning %d\n", result);
|
return result;
|
}
|
|
|
int
|
WaveAudioSource::SourceStopped()
|
{
|
int result(0);
|
int returnCode(0);
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "SourceStopped\n");
|
EnterCriticalSection(&filterMutex);
|
tracer.tracef(ARB, "SourceStopped : entered filterMutex\n");
|
do
|
{
|
if (!bRunning)
|
{
|
tracer.tracef(ERR, "SourceStopped : e-error : not running\n");
|
result = -10;
|
break;
|
}
|
returnCode = EndAudioCapture();
|
if (returnCode != 0)
|
{
|
tracer.tracef(ERR, "SourceStopped : e-EndAudioCapture : %d 0x%x\n", returnCode, returnCode);
|
result = -20;
|
break;
|
}
|
needToSetVolume = false;
|
bRunning = false;
|
}
|
while (false);
|
LeaveCriticalSection(&filterMutex);
|
tracer.tracef(ARB, "SourceStopped : left filterMutex\n");
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "~SourceStopped : returning %d\n", result);
|
return result;
|
}
|
|
int
|
WaveAudioSource::SourceThreadStarted(HANDLE sourceThreadHandle, DWORD sourceThreadID)
|
{
|
int result(0);
|
int returnCode(0);
|
SetTraceLevel();
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "SourceThreadStarted\n");
|
EnterCriticalSection(&filterMutex);
|
tracer.tracef(ARB, "SourceThreadStarted : entered filterMutex\n");
|
do
|
{
|
returnCode = SetThreadPriority(sourceThreadHandle, THREAD_PRIORITY_TIME_CRITICAL);
|
if (returnCode == 0)
|
{
|
returnCode = GetLastError();
|
tracer.tracef(ERR, "SourceThreadStarted : e-SetThreadPriority : %d 0x%x\n", returnCode, returnCode);
|
}
|
returnCode = BeginAudioCapture();
|
if (returnCode != 0)
|
{
|
tracer.tracef(ERR, "SourceThreadStarted : e-BeginAudioCapture : %d 0x%x\n", returnCode, returnCode);
|
result = -10;
|
break;
|
}
|
if (needToSetVolume)
|
{
|
SetVolume(micVolume);
|
needToSetVolume = false;
|
}
|
bRunning = true;
|
}
|
while (false);
|
LeaveCriticalSection(&filterMutex);
|
tracer.tracef(ARB, "SourceThreadStarted : left filterMutex\n");
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "~SourceThreadStarted : returning %d\n", result);
|
return result;
|
}
|
|
int
|
WaveAudioSource::SourceThreadStopped(HANDLE sourceThreadHandle, DWORD sourceThreadID)
|
{
|
int result(0);
|
int returnCode(0);
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "SourceThreadStopped\n");
|
EnterCriticalSection(&filterMutex);
|
tracer.tracef(ARB, "SourceThreadStopped : entered filterMutex\n");
|
do
|
{
|
if (!bRunning)
|
{
|
tracer.tracef(ERR, "SourceThreadStopped : not running\n");
|
result = -10;
|
break;
|
}
|
returnCode = EndAudioCapture();
|
if (returnCode != 0)
|
{
|
tracer.tracef(ERR, "SourceThreadStopped : e-EndAudioCapture : %d 0x%x\n", returnCode, returnCode);
|
result = -20;
|
break;
|
}
|
needToSetVolume = false;
|
bRunning = false;
|
}
|
while (false);
|
LeaveCriticalSection(&filterMutex);
|
tracer.tracef(ARB, "SourceThreadStopped : left filterMutex\n");
|
tracer.tracef(SDI_LEVEL_ENTRY_EXIT, "~SourceThreadStopped : returning %d\n", result);
|
return result;
|
}
|