// RTPJitterBuffer.cpp: implementation of the RTPJitterBuffer class.
|
//
|
//////////////////////////////////////////////////////////////////////
|
|
#include "RTPJitterBuffer.h"
|
#include "RTPAudioStream.h"
|
#include "AudioSample.h"
|
#include "AudioSampleManager.h"
|
#include "Parameters.h"
|
#include "RTPPacket.h"
|
|
using namespace std;
|
|
//////////////////////////////////////////////////////////////////////
|
// Construction/Destruction
|
//////////////////////////////////////////////////////////////////////
|
|
RTPJitterBuffer::RTPJitterBuffer()
|
{
|
char subFacilityName[100];
|
sprintf(subFacilityName, "RTPJitterBuffer:%x", this);
|
tracer.SetSubFacilityName(subFacilityName);
|
SetTraceLevel();
|
jitterBufferDepth = DEFAULT_JITTERBUFFER_SIZE;
|
outputDuration = DEFAULT_JITTERBUFFER_OUTPUTDURATION;
|
activeAudioStream = NULL;
|
numActiveStreamNULLs = 0;
|
bRunning = false;
|
InitializeCriticalSection(&rtpAudioStreamsMutex);
|
InitializeCriticalSection(&stateMutex);
|
}
|
|
RTPJitterBuffer::~RTPJitterBuffer()
|
{
|
tracer.tracef(EE, "RTPJitterBuffer~\n");
|
Clear();
|
DeleteCriticalSection(&rtpAudioStreamsMutex);
|
DeleteCriticalSection(&stateMutex);
|
tracer.tracef(EE, "~RTPJitterBuffer~\n");
|
}
|
|
int
|
RTPJitterBuffer::SetTraceLevel()
|
{
|
long SystemMask = 0;
|
if ((SystemMask = GetRegKeyLong(HKEY_CURRENT_USER, "Software\\Cisco Systems\\MTC\\Tracing", "AllComponents", 0x0)) == 0)
|
{
|
SystemMask = GetRegKeyLong(HKEY_CURRENT_USER, "Software\\Cisco Systems\\MTC\\Tracing", "RTPJitterBuffer", 0x100000);
|
}
|
tracer.SetSystemMask(SystemMask);
|
return 0;
|
}
|
|
int
|
RTPJitterBuffer::SetJitterBufferDepth(int depth)
|
{
|
jitterBufferDepth = depth;
|
return 0;
|
}
|
|
int
|
RTPJitterBuffer::SetOutputDuration(int outputDuration)
|
{
|
this->outputDuration = outputDuration;
|
return 0;
|
}
|
|
int
|
RTPJitterBuffer::Clear()
|
{
|
EnterCriticalSection(&rtpAudioStreamsMutex);
|
while(rtpAudioStreams.size() > 0)
|
{
|
RTPAudioStream *audioStream = (*(rtpAudioStreams.begin())).second;
|
rtpAudioStreams.erase(rtpAudioStreams.begin());
|
tracer.tracef(EE, "Clear : deleting audio stream 0x%x ssrc = %u\n", audioStream, audioStream->SSRC());
|
delete audioStream;
|
}
|
LeaveCriticalSection(&rtpAudioStreamsMutex);
|
return 0;
|
}
|
|
int
|
RTPJitterBuffer::ChooseNewActiveAudioStream()
|
{
|
tracer.tracef(DET, "ChooseNewActiveStream\n");
|
tracer.tracef(DET, "ChooseNewActiveStream : entering rtpAudioStreamsMutex\n");
|
EnterCriticalSection(&rtpAudioStreamsMutex);
|
numActiveStreamNULLs = 0;
|
if (activeAudioStream)
|
{
|
tracer.tracef(DET, "ChooseNewActiveStream : deleting current active stream\n");
|
rtpAudioStreams.erase(activeAudioStream->SSRC());
|
delete activeAudioStream;
|
activeAudioStream = NULL;
|
}
|
AUDIO_STREAM_MAP::iterator iter = rtpAudioStreams.begin();
|
while (iter != rtpAudioStreams.end())
|
{
|
RTPAudioStream *rtpAudioStream = (*iter).second;
|
tracer.tracef(EE, "ChooseNewActiveStream : found RTP stream : SSRC %u, %d packets, %d ms queued\n", rtpAudioStream->SSRC(), rtpAudioStream->NumQueuedPackets(), rtpAudioStream->JitterBufferDepth());
|
if (rtpAudioStream->JitterBufferDepth() >= jitterBufferDepth)
|
{
|
activeAudioStream = rtpAudioStream;
|
rtpAudioStreams.erase(activeAudioStream->SSRC());
|
tracer.tracef(EE, "ChooseNewActiveStream : stream 0x%x with SSRC=%u, %d packets, %dms chosen as new active stream\n", activeAudioStream, activeAudioStream->SSRC(), activeAudioStream->NumQueuedPackets(), activeAudioStream->JitterBufferDepth());
|
break;
|
}
|
iter++;
|
}
|
if (activeAudioStream)
|
{
|
// remove all other rtp streams since we chose one stream as active
|
Clear();
|
rtpAudioStreams[activeAudioStream->SSRC()] = activeAudioStream;
|
}
|
LeaveCriticalSection(&rtpAudioStreamsMutex);
|
tracer.tracef(DET, "ChooseNewActiveStream : left rtpAudioStreamsMutex\n");
|
tracer.tracef(DET, "~ChooseNewActiveStream\n");
|
return 0;
|
}
|
|
int
|
RTPJitterBuffer::TransformStarted()
|
{
|
tracer.tracef(EE, "TransformStarted\n");
|
AudioTransformer::TransformStarted();
|
EnterCriticalSection(&stateMutex);
|
if (bRunning)
|
{
|
tracer.tracef(ARB, "TransformStarted : Already Running\n");
|
}
|
else
|
{
|
EnterCriticalSection(&rtpAudioStreamsMutex);
|
Clear();
|
activeAudioStream = NULL;
|
LeaveCriticalSection(&rtpAudioStreamsMutex);
|
bRunning = true;
|
}
|
LeaveCriticalSection(&stateMutex);
|
tracer.tracef(EE, "~TransformStarted\n");
|
return 0;
|
}
|
|
int
|
RTPJitterBuffer::TransformStopped()
|
{
|
tracer.tracef(EE, "TransformStopped\n");
|
AudioTransformer::TransformStopped();
|
EnterCriticalSection(&stateMutex);
|
bRunning = false;
|
LeaveCriticalSection(&stateMutex);
|
tracer.tracef(EE, "~TransformStopped\n");
|
return 0;
|
}
|
|
int
|
RTPJitterBuffer::TransformAudioSamples(std::vector<std::pair<AudioSample *, AudioSource *> > &data, AudioSample **ppAudioSample)
|
{
|
return 0;
|
}
|
|
int
|
RTPJitterBuffer::RenderAudioSamples(std::vector<std::pair<AudioSample *, AudioSource *> > &data)
|
{
|
tracer.tracef(DET, "RenderAudioSamples\n");
|
EnterCriticalSection(&stateMutex);
|
if (!bRunning)
|
{
|
LeaveCriticalSection(&stateMutex);
|
tracer.tracef(EE, "RenderAudioSamples : bRunning = false. Sleeping %d\n", outputDuration);
|
Sleep(outputDuration);
|
}
|
else
|
{
|
LeaveCriticalSection(&stateMutex);
|
AudioSample *audioSample = NULL;
|
if (data.size() > 0)
|
{
|
audioSample = data[0].first;
|
}
|
if (audioSample)
|
{
|
RTPPacket *packetHeader = audioSample->RTPHeader();
|
if (!packetHeader)
|
{
|
tracer.tracef(DET, "~RenderAudioSamples : packet with no RTP header\n");
|
return -10;
|
}
|
unsigned long ssrc = packetHeader->Ssrc();
|
tracer.tracef(ARB, "RenderAudioSamples : RTP packet : ssrc %u, seqno %u, timestamp %u\n", ssrc, packetHeader->SeqNo(), packetHeader->Timestamp());
|
tracer.tracef(DET, "RenderAudioSamples : Entering rtpAudioStreamsMutex\n");
|
EnterCriticalSection(&rtpAudioStreamsMutex);
|
AUDIO_STREAM_MAP::iterator iter;
|
iter = rtpAudioStreams.find(ssrc);
|
if (iter == rtpAudioStreams.end())
|
{
|
RTPAudioStream *rtpAudioStream = new RTPAudioStream(ssrc);
|
tracer.tracef(SIG, "RenderAudioSamples : no previous stream with ssrc %d. created new one 0x%x\n", ssrc, rtpAudioStream);
|
if (!rtpAudioStream)
|
{
|
return -20;
|
}
|
rtpAudioStreams[ssrc] = rtpAudioStream;
|
iter = rtpAudioStreams.find(ssrc);
|
}
|
((*iter).second)->InsertRTPPacket(audioSample);
|
tracer.tracef(ARB, "RenderAudioSamples : put packet into RTP stream 0x%x: SSRC %d, %d ms in %d packets\n", (*iter).second, ((*iter).second)->SSRC(), ((*iter).second)->JitterBufferDepth(), ((*iter).second)->NumQueuedPackets());
|
LeaveCriticalSection(&rtpAudioStreamsMutex);
|
tracer.tracef(DET, "RenderAudioSamples : Left rtpAudioStreamsMutex\n");
|
}
|
}
|
tracer.tracef(DET, "~RenderAudioSamples\n");
|
return 0;
|
}
|
|
int
|
RTPJitterBuffer::GenerateData(AudioSample **ppAudioSample)
|
{
|
tracer.tracef(DET, "GenerateData\n");
|
int result = 0;
|
*ppAudioSample = NULL;
|
|
EnterCriticalSection(&stateMutex);
|
if (!bRunning)
|
{
|
LeaveCriticalSection(&stateMutex);
|
tracer.tracef(EE, "GenerateData : bRunning = false\n");
|
}
|
else
|
{
|
LeaveCriticalSection(&stateMutex);
|
EnterCriticalSection(&rtpAudioStreamsMutex);
|
if (activeAudioStream == NULL ||
|
activeAudioStream->NumQueuedPackets() < 1 ||
|
numActiveStreamNULLs > 5)
|
{
|
tracer.tracef(ARB, "GenerateData : Choosing new active stream\n");
|
ChooseNewActiveAudioStream();
|
}
|
if (activeAudioStream)
|
{
|
tracer.tracef(ARB, "GenerateData : activeAudioStream is SSRC %d, %d ms in %d packets\n", activeAudioStream->SSRC(), activeAudioStream->JitterBufferDepth(), activeAudioStream->NumQueuedPackets());
|
activeAudioStream->GiveNextRTPPacket(ppAudioSample);
|
LeaveCriticalSection(&rtpAudioStreamsMutex);
|
if (*ppAudioSample == NULL)
|
{
|
numActiveStreamNULLs++;
|
tracer.tracef(ARB, "GenerateData : GiveNextRTPPacket returned NULL %d times. sending silence\n", numActiveStreamNULLs);
|
(AudioSampleManager::GetInstance())->GetAudioSample(ppAudioSample, this);
|
(*ppAudioSample)->SetDataSize(0);
|
(*ppAudioSample)->SetSilenceDuration(outputDuration);
|
(*ppAudioSample)->SetFormat(WaveFormat::GetWaveFormat(WaveFormat_PCM_16_8_1));
|
}
|
else
|
{
|
numActiveStreamNULLs = 0;
|
RTPPacket *rtpHeader = (*ppAudioSample)->RTPHeader();
|
tracer.tracef(ARB, "GenerateData : packet : ssrc %u, seqno %u\n", rtpHeader->Ssrc(), rtpHeader->SeqNo());
|
}
|
}
|
else
|
{
|
LeaveCriticalSection(&rtpAudioStreamsMutex);
|
tracer.tracef(ARB, "GenerateData : sending silence\n");
|
(AudioSampleManager::GetInstance())->GetAudioSample(ppAudioSample, this);
|
(*ppAudioSample)->SetDataSize(0);
|
(*ppAudioSample)->SetSilenceDuration(outputDuration);
|
(*ppAudioSample)->SetFormat(WaveFormat::GetWaveFormat(WaveFormat_PCM_16_8_1));
|
}
|
}
|
tracer.tracef(DET, "~GenerateData\n");
|
return 0;
|
}
|