// AudioSample.cpp: implementation of the AudioSample class.
|
//
|
//////////////////////////////////////////////////////////////////////
|
|
#include "RTPPacket.h"
|
#include "AudioSample.h"
|
#include "AudioSampleManager.h"
|
#include "Parameters.h"
|
|
//////////////////////////////////////////////////////////////////////
|
// Construction/Destruction
|
//////////////////////////////////////////////////////////////////////
|
|
AudioSample::AudioSample(AudioSampleManager *audioSampleManager, int size = DEFAULT_AUDIOSAMPLE_BUFFERSIZE)
|
{
|
InitializeCriticalSection(&sampleMutex);
|
Initialize(audioSampleManager, size);
|
}
|
|
|
AudioSample::AudioSample(AudioSampleManager *audioSampleManager)
|
{
|
InitializeCriticalSection(&sampleMutex);
|
Initialize(audioSampleManager, DEFAULT_AUDIOSAMPLE_BUFFERSIZE);
|
}
|
|
AudioSample::~AudioSample()
|
{
|
EnterCriticalSection(&sampleMutex);
|
if (data)
|
{
|
delete [] data;
|
}
|
if (rtpHeader)
|
{
|
delete rtpHeader;
|
}
|
LeaveCriticalSection(&sampleMutex);
|
DeleteCriticalSection(&sampleMutex);
|
}
|
|
bool
|
AudioSample::operator < (AudioSample &a )
|
{
|
if (!a.RTPHeader() || !this->RTPHeader())
|
{
|
return false;
|
}
|
unsigned long timestamp1 = (a.RTPHeader())->Timestamp();
|
unsigned long timestamp2 = (this->RTPHeader())->Timestamp();
|
unsigned long timediff = timestamp1 > timestamp2 ? timestamp1 - timestamp2 : timestamp2 - timestamp1;
|
if (timestamp2 != timestamp1)
|
{
|
|
return timediff < 0x8fffffff ? (timestamp2 < timestamp1) : (timestamp1 < timestamp2);
|
}
|
else
|
{
|
unsigned short seqno1 = (a.RTPHeader())->SeqNo();
|
unsigned short seqno2 = (this->RTPHeader())->SeqNo();
|
unsigned short seqdiff = seqno1 > seqno2 ? seqno1 - seqno2 : seqno2 - seqno1;
|
return seqdiff < 0x8fff ? (seqno2 < seqno1) : (seqno1 < seqno2);
|
}
|
}
|
|
bool
|
AudioSample::operator >= (AudioSample &a )
|
{
|
return !(*this < a);
|
}
|
|
int
|
AudioSample::Initialize(AudioSampleManager *audioSampleManager, int size)
|
{
|
EnterCriticalSection(&sampleMutex);
|
data = new char[size];
|
rtpHeader = NULL;
|
bufferSize = size;
|
dataSize = 0;
|
refCount = 0;
|
manager = audioSampleManager;
|
char subFacilityName[100];
|
sprintf(subFacilityName, "AudioSample:%x", this);
|
tracer.SetSubFacilityName(subFacilityName);
|
SetTraceLevel();
|
LeaveCriticalSection(&sampleMutex);
|
return 0;
|
}
|
|
int
|
AudioSample::Uninitialize()
|
{
|
if (rtpHeader)
|
{
|
delete rtpHeader;
|
rtpHeader = NULL;
|
dataSize = 0;
|
}
|
return 0;
|
}
|
|
int
|
AudioSample::SetTraceLevel()
|
{
|
long SystemMask = 0;
|
if ((SystemMask = GetRegKeyLong(HKEY_CURRENT_USER, "Software\\Cisco Systems\\MTC\\Tracing", "AllComponents", 0x0)) == 0)
|
{
|
SystemMask = GetRegKeyLong(HKEY_CURRENT_USER, "Software\\Cisco Systems\\MTC\\Tracing", "AudioSample", EE);
|
}
|
tracer.SetSystemMask(SystemMask);
|
return 0;
|
}
|
|
int
|
AudioSample::BufferSize()
|
{
|
return bufferSize;
|
}
|
|
int
|
AudioSample::DataSize()
|
{
|
return dataSize;
|
}
|
|
int
|
AudioSample::SilenceSize()
|
{
|
return (silenceLengthMilliseconds * 1000 * minFrameSize) / minFrameDuration; // silenceDuration is in millisecs, minFrameDuration is in microsecs
|
}
|
|
char *
|
AudioSample::Data()
|
{
|
return data;
|
}
|
|
RTPPacket *
|
AudioSample::RTPHeader()
|
{
|
EnterCriticalSection(&sampleMutex);
|
RTPPacket *header = rtpHeader;
|
LeaveCriticalSection(&sampleMutex);
|
return header;
|
}
|
|
int
|
AudioSample::SetRTPHeader(RTPPacket *headerPtr)
|
{
|
EnterCriticalSection(&sampleMutex);
|
if (rtpHeader)
|
{
|
delete rtpHeader;
|
}
|
rtpHeader = headerPtr;
|
LeaveCriticalSection(&sampleMutex);
|
return 0;
|
}
|
|
int
|
AudioSample::AddRef()
|
{
|
EnterCriticalSection(&sampleMutex);
|
int result(0);
|
result = ++refCount;
|
LeaveCriticalSection(&sampleMutex);
|
return result;
|
}
|
|
int
|
AudioSample::AddRef(TraceUser *container)
|
{
|
EnterCriticalSection(&sampleMutex);
|
int result(0);
|
if (refCount == 0)
|
{
|
tracer.tracef(ERR, "AddRef : by 0x%x:%s on sample 0x%x with refCount = 0 !\n", container, container->TraceName(), this);
|
result = -10;
|
refCount = 1000;
|
Sleep(500);
|
}
|
else
|
{
|
result = ++refCount;
|
}
|
LeaveCriticalSection(&sampleMutex);
|
return result;
|
}
|
|
int
|
AudioSample::Release()
|
{
|
EnterCriticalSection(&sampleMutex);
|
int result = 0;
|
if (refCount > 0)
|
{
|
result = --refCount;
|
if (refCount == 0)
|
{
|
Uninitialize();
|
manager->AudioSampleReleased(this);
|
}
|
}
|
else
|
{
|
tracer.tracef(ERR, "Release : e-refCount : Error : refCount = %d\n", refCount);
|
result = -10;
|
Sleep(500);
|
}
|
LeaveCriticalSection(&sampleMutex);
|
return result;
|
}
|
|
int
|
AudioSample::Release(TraceUser *container)
|
{
|
EnterCriticalSection(&sampleMutex);
|
int result = 0;
|
if (refCount > 0)
|
{
|
result = --refCount;
|
if (refCount == 0)
|
{
|
#ifdef DEBUG_AUDIOSAMPLES
|
tracer.tracef(EE, "Release : Sample 0x%x returned to manager by 0x%x : %s\n", this, container, container->TraceName());
|
#endif
|
Uninitialize();
|
manager->AudioSampleReleased(this);
|
}
|
}
|
else
|
{
|
tracer.tracef(ERR, "Release : Sample 0x%x has invalid refCount ! by 0x%x : %s\n", this, container, container->TraceName());
|
refCount = 1000;
|
result = -10;
|
Sleep(500);
|
}
|
LeaveCriticalSection(&sampleMutex);
|
return result;
|
}
|
|
int
|
AudioSample::GetFormat(WAVEFORMATEX *waveFormat)
|
{
|
if (waveFormat != NULL)
|
{
|
*waveFormat = format;
|
}
|
return 0;
|
}
|
|
int
|
AudioSample::SetFormat(WAVEFORMATEX &waveFormat)
|
{
|
if (refCount <= 1)
|
{
|
format = waveFormat;
|
switch(format.wFormatTag)
|
{
|
case WAVE_FORMAT_PCM:
|
minFrameDuration = 1000000 / format.nSamplesPerSec; // min frame duration in microseconds
|
minFrameSize = (format.wBitsPerSample * format.nChannels) / 8; // min frame size in bytes
|
break;
|
case WAVE_FORMAT_MULAW:
|
case WAVE_FORMAT_ALAW:
|
minFrameDuration = 125;
|
minFrameSize = 1;
|
break;
|
case WAVE_FORMAT_CISCO_G729:
|
minFrameDuration = 10000; // 10ms
|
minFrameSize = 10; // 10 bytes
|
break;
|
case WAVE_FORMAT_CISCO_G723_53:
|
minFrameDuration = 30000;
|
minFrameSize = 20;
|
break;
|
case WAVE_FORMAT_CISCO_G723_63:
|
minFrameDuration = 30000;
|
minFrameSize = 24;
|
break;
|
default:
|
break;
|
}
|
}
|
else
|
{
|
tracer.tracef(ERR, "SetFormat : e-refCount : Error : refCount = %d\n", refCount);
|
}
|
return 0;
|
}
|
|
int
|
AudioSample::MinFrameDuration()
|
{
|
return minFrameDuration;
|
}
|
|
int
|
AudioSample::MinFrameSize()
|
{
|
return minFrameSize;
|
}
|
|
int
|
AudioSample::SetDataSize(int size)
|
{
|
dataSize = size;
|
if (dataSize > 0)
|
{
|
SetSilenceDuration(0);
|
}
|
return 0;
|
}
|
|
int
|
AudioSample::GetSilenceDuration()
|
{
|
// tracer.tracef(DET, "GetSilenceDuration : silenceLengthMilliseconds = %d\n", silenceLengthMilliseconds);
|
return silenceLengthMilliseconds;
|
}
|
|
int
|
AudioSample::SetSilenceDuration(int milliseconds)
|
{
|
silenceLengthMilliseconds = milliseconds;
|
return 0;
|
}
|
|
/**
|
Returns the length of audio in this frame in milliseconds
|
*/
|
int
|
AudioSample::GetDuration()
|
{
|
// tracer.tracef(DET, "GetDuration : minFrameDuration = %d, dataSize = %d, minFrameSize = %d\n", minFrameDuration, dataSize, minFrameSize);
|
return ((minFrameDuration * dataSize) / minFrameSize) / 1000; // minFrameDuration is in microsec, GetDuration returns millisec
|
}
|