[Reland] Cleanup of the AudioDeviceBuffer class.

See https://codereview.webrtc.org/2256833003/

Contains a minor change to ensure that an external client builds.

TBR=magjed
BUG=NONE

Review-Url: https://codereview.webrtc.org/2269553004
Cr-Commit-Position: refs/heads/master@{#13845}
This commit is contained in:
henrika 2016-08-22 05:56:12 -07:00 committed by Commit bot
parent 83d79cd4a2
commit 49810511c9
2 changed files with 193 additions and 233 deletions

View File

@ -22,9 +22,6 @@
namespace webrtc {
static const int kHighDelayThresholdMs = 300;
static const int kLogHighDelayIntervalFrames = 500; // 5 seconds.
static const char kTimerQueueName[] = "AudioDeviceBufferTimer";
// Time between two sucessive calls to LogStats().
@ -33,30 +30,26 @@ static const size_t kTimerIntervalInMilliseconds =
kTimerIntervalInSeconds * rtc::kNumMillisecsPerSec;
AudioDeviceBuffer::AudioDeviceBuffer()
: _ptrCbAudioTransport(nullptr),
: audio_transport_cb_(nullptr),
task_queue_(kTimerQueueName),
timer_has_started_(false),
_recSampleRate(0),
_playSampleRate(0),
_recChannels(0),
_playChannels(0),
_recChannel(AudioDeviceModule::kChannelBoth),
_recBytesPerSample(0),
_playBytesPerSample(0),
_recSamples(0),
_recSize(0),
_playSamples(0),
_playSize(0),
_recFile(*FileWrapper::Create()),
_playFile(*FileWrapper::Create()),
_currentMicLevel(0),
_newMicLevel(0),
_typingStatus(false),
_playDelayMS(0),
_recDelayMS(0),
_clockDrift(0),
// Set to the interval in order to log on the first occurrence.
high_delay_counter_(kLogHighDelayIntervalFrames),
rec_sample_rate_(0),
play_sample_rate_(0),
rec_channels_(0),
play_channels_(0),
rec_channel_(AudioDeviceModule::kChannelBoth),
rec_bytes_per_sample_(0),
play_bytes_per_sample_(0),
rec_samples_per_10ms_(0),
rec_bytes_per_10ms_(0),
play_samples_per_10ms_(0),
play_bytes_per_10ms_(0),
current_mic_level_(0),
new_mic_level_(0),
typing_status_(false),
play_delay_ms_(0),
rec_delay_ms_(0),
clock_drift_(0),
num_stat_reports_(0),
rec_callbacks_(0),
last_rec_callbacks_(0),
@ -68,8 +61,6 @@ AudioDeviceBuffer::AudioDeviceBuffer()
last_play_samples_(0),
last_log_stat_time_(0) {
LOG(INFO) << "AudioDeviceBuffer::ctor";
memset(_recBuffer, 0, kMaxBufferSizeBytes);
memset(_playBuffer, 0, kMaxBufferSizeBytes);
}
AudioDeviceBuffer::~AudioDeviceBuffer() {
@ -93,27 +84,19 @@ AudioDeviceBuffer::~AudioDeviceBuffer() {
LOG(INFO) << "average: "
<< static_cast<float>(total_diff_time) / num_measurements;
}
_recFile.Flush();
_recFile.CloseFile();
delete &_recFile;
_playFile.Flush();
_playFile.CloseFile();
delete &_playFile;
}
int32_t AudioDeviceBuffer::RegisterAudioCallback(
AudioTransport* audioCallback) {
AudioTransport* audio_callback) {
LOG(INFO) << __FUNCTION__;
rtc::CritScope lock(&_critSectCb);
_ptrCbAudioTransport = audioCallback;
audio_transport_cb_ = audio_callback;
return 0;
}
int32_t AudioDeviceBuffer::InitPlayout() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
LOG(INFO) << __FUNCTION__;
RTC_DCHECK(thread_checker_.CalledOnValidThread());
last_playout_time_ = rtc::TimeMillis();
if (!timer_has_started_) {
StartTimer();
@ -123,8 +106,8 @@ int32_t AudioDeviceBuffer::InitPlayout() {
}
int32_t AudioDeviceBuffer::InitRecording() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
LOG(INFO) << __FUNCTION__;
RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!timer_has_started_) {
StartTimer();
timer_has_started_ = true;
@ -135,38 +118,40 @@ int32_t AudioDeviceBuffer::InitRecording() {
int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz) {
LOG(INFO) << "SetRecordingSampleRate(" << fsHz << ")";
rtc::CritScope lock(&_critSect);
_recSampleRate = fsHz;
rec_sample_rate_ = fsHz;
return 0;
}
int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz) {
LOG(INFO) << "SetPlayoutSampleRate(" << fsHz << ")";
rtc::CritScope lock(&_critSect);
_playSampleRate = fsHz;
play_sample_rate_ = fsHz;
return 0;
}
int32_t AudioDeviceBuffer::RecordingSampleRate() const {
return _recSampleRate;
return rec_sample_rate_;
}
int32_t AudioDeviceBuffer::PlayoutSampleRate() const {
return _playSampleRate;
return play_sample_rate_;
}
int32_t AudioDeviceBuffer::SetRecordingChannels(size_t channels) {
LOG(INFO) << "SetRecordingChannels(" << channels << ")";
rtc::CritScope lock(&_critSect);
_recChannels = channels;
_recBytesPerSample =
rec_channels_ = channels;
rec_bytes_per_sample_ =
2 * channels; // 16 bits per sample in mono, 32 bits in stereo
return 0;
}
int32_t AudioDeviceBuffer::SetPlayoutChannels(size_t channels) {
LOG(INFO) << "SetPlayoutChannels(" << channels << ")";
rtc::CritScope lock(&_critSect);
_playChannels = channels;
play_channels_ = channels;
// 16 bits per sample in mono, 32 bits in stereo
_playBytesPerSample = 2 * channels;
play_bytes_per_sample_ = 2 * channels;
return 0;
}
@ -174,135 +159,101 @@ int32_t AudioDeviceBuffer::SetRecordingChannel(
const AudioDeviceModule::ChannelType channel) {
rtc::CritScope lock(&_critSect);
if (_recChannels == 1) {
if (rec_channels_ == 1) {
return -1;
}
if (channel == AudioDeviceModule::kChannelBoth) {
// two bytes per channel
_recBytesPerSample = 4;
rec_bytes_per_sample_ = 4;
} else {
// only utilize one out of two possible channels (left or right)
_recBytesPerSample = 2;
rec_bytes_per_sample_ = 2;
}
_recChannel = channel;
rec_channel_ = channel;
return 0;
}
int32_t AudioDeviceBuffer::RecordingChannel(
AudioDeviceModule::ChannelType& channel) const {
channel = _recChannel;
channel = rec_channel_;
return 0;
}
size_t AudioDeviceBuffer::RecordingChannels() const {
return _recChannels;
return rec_channels_;
}
size_t AudioDeviceBuffer::PlayoutChannels() const {
return _playChannels;
return play_channels_;
}
int32_t AudioDeviceBuffer::SetCurrentMicLevel(uint32_t level) {
_currentMicLevel = level;
current_mic_level_ = level;
return 0;
}
int32_t AudioDeviceBuffer::SetTypingStatus(bool typingStatus) {
_typingStatus = typingStatus;
int32_t AudioDeviceBuffer::SetTypingStatus(bool typing_status) {
typing_status_ = typing_status;
return 0;
}
uint32_t AudioDeviceBuffer::NewMicLevel() const {
return _newMicLevel;
return new_mic_level_;
}
void AudioDeviceBuffer::SetVQEData(int playDelayMs,
int recDelayMs,
int clockDrift) {
if (high_delay_counter_ < kLogHighDelayIntervalFrames) {
++high_delay_counter_;
} else {
if (playDelayMs + recDelayMs > kHighDelayThresholdMs) {
high_delay_counter_ = 0;
LOG(LS_WARNING) << "High audio device delay reported (render="
<< playDelayMs << " ms, capture=" << recDelayMs << " ms)";
}
}
_playDelayMS = playDelayMs;
_recDelayMS = recDelayMs;
_clockDrift = clockDrift;
void AudioDeviceBuffer::SetVQEData(int play_delay_ms,
int rec_delay_ms,
int clock_drift) {
play_delay_ms_ = play_delay_ms;
rec_delay_ms_ = rec_delay_ms;
clock_drift_ = clock_drift;
}
int32_t AudioDeviceBuffer::StartInputFileRecording(
const char fileName[kAdmMaxFileNameSize]) {
rtc::CritScope lock(&_critSect);
_recFile.Flush();
_recFile.CloseFile();
return _recFile.OpenFile(fileName, false) ? 0 : -1;
LOG(LS_WARNING) << "Not implemented";
return 0;
}
int32_t AudioDeviceBuffer::StopInputFileRecording() {
rtc::CritScope lock(&_critSect);
_recFile.Flush();
_recFile.CloseFile();
LOG(LS_WARNING) << "Not implemented";
return 0;
}
int32_t AudioDeviceBuffer::StartOutputFileRecording(
const char fileName[kAdmMaxFileNameSize]) {
rtc::CritScope lock(&_critSect);
_playFile.Flush();
_playFile.CloseFile();
return _playFile.OpenFile(fileName, false) ? 0 : -1;
}
int32_t AudioDeviceBuffer::StopOutputFileRecording() {
rtc::CritScope lock(&_critSect);
_playFile.Flush();
_playFile.CloseFile();
LOG(LS_WARNING) << "Not implemented";
return 0;
}
int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audioBuffer,
size_t nSamples) {
int32_t AudioDeviceBuffer::StopOutputFileRecording() {
LOG(LS_WARNING) << "Not implemented";
return 0;
}
int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audio_buffer,
size_t num_samples) {
AllocateRecordingBufferIfNeeded();
RTC_CHECK(rec_buffer_);
// WebRTC can only receive audio in 10ms chunks, hence we fail if the native
// audio layer tries to deliver something else.
RTC_CHECK_EQ(num_samples, rec_samples_per_10ms_);
rtc::CritScope lock(&_critSect);
if (_recBytesPerSample == 0) {
assert(false);
return -1;
}
_recSamples = nSamples;
_recSize = _recBytesPerSample * nSamples; // {2,4}*nSamples
if (_recSize > kMaxBufferSizeBytes) {
assert(false);
return -1;
}
if (_recChannel == AudioDeviceModule::kChannelBoth) {
// (default) copy the complete input buffer to the local buffer
memcpy(&_recBuffer[0], audioBuffer, _recSize);
if (rec_channel_ == AudioDeviceModule::kChannelBoth) {
// Copy the complete input buffer to the local buffer.
memcpy(&rec_buffer_[0], audio_buffer, rec_bytes_per_10ms_);
} else {
int16_t* ptr16In = (int16_t*)audioBuffer;
int16_t* ptr16Out = (int16_t*)&_recBuffer[0];
if (AudioDeviceModule::kChannelRight == _recChannel) {
int16_t* ptr16In = (int16_t*)audio_buffer;
int16_t* ptr16Out = (int16_t*)&rec_buffer_[0];
if (AudioDeviceModule::kChannelRight == rec_channel_) {
ptr16In++;
}
// exctract left or right channel from input buffer to the local buffer
for (size_t i = 0; i < _recSamples; i++) {
// Exctract left or right channel from input buffer to the local buffer.
for (size_t i = 0; i < rec_samples_per_10ms_; i++) {
*ptr16Out = *ptr16In;
ptr16Out++;
ptr16In++;
@ -310,52 +261,40 @@ int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audioBuffer,
}
}
if (_recFile.is_open()) {
// write to binary file in mono or stereo (interleaved)
_recFile.Write(&_recBuffer[0], _recSize);
}
// Update some stats but do it on the task queue to ensure that the members
// are modified and read on the same thread.
task_queue_.PostTask(
rtc::Bind(&AudioDeviceBuffer::UpdateRecStats, this, nSamples));
rtc::Bind(&AudioDeviceBuffer::UpdateRecStats, this, num_samples));
return 0;
}
int32_t AudioDeviceBuffer::DeliverRecordedData() {
RTC_CHECK(rec_buffer_);
RTC_DCHECK(audio_transport_cb_);
rtc::CritScope lock(&_critSectCb);
// Ensure that user has initialized all essential members
if ((_recSampleRate == 0) || (_recSamples == 0) ||
(_recBytesPerSample == 0) || (_recChannels == 0)) {
RTC_NOTREACHED();
return -1;
}
if (!_ptrCbAudioTransport) {
if (!audio_transport_cb_) {
LOG(LS_WARNING) << "Invalid audio transport";
return 0;
}
int32_t res(0);
uint32_t newMicLevel(0);
uint32_t totalDelayMS = _playDelayMS + _recDelayMS;
res = _ptrCbAudioTransport->RecordedDataIsAvailable(
&_recBuffer[0], _recSamples, _recBytesPerSample, _recChannels,
_recSampleRate, totalDelayMS, _clockDrift, _currentMicLevel,
_typingStatus, newMicLevel);
uint32_t totalDelayMS = play_delay_ms_ + rec_delay_ms_;
res = audio_transport_cb_->RecordedDataIsAvailable(
&rec_buffer_[0], rec_samples_per_10ms_, rec_bytes_per_sample_,
rec_channels_, rec_sample_rate_, totalDelayMS, clock_drift_,
current_mic_level_, typing_status_, newMicLevel);
if (res != -1) {
_newMicLevel = newMicLevel;
new_mic_level_ = newMicLevel;
} else {
LOG(LS_ERROR) << "RecordedDataIsAvailable() failed";
}
return 0;
}
int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples) {
uint32_t playSampleRate = 0;
size_t playBytesPerSample = 0;
size_t playChannels = 0;
int32_t AudioDeviceBuffer::RequestPlayoutData(size_t num_samples) {
// Measure time since last function call and update an array where the
// position/index corresponds to time differences (in milliseconds) between
// two successive playout callbacks, and the stored value is the number of
@ -367,37 +306,17 @@ int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples) {
last_playout_time_ = now_time;
playout_diff_times_[diff_time]++;
// TOOD(henrika): improve bad locking model and make it more clear that only
// 10ms buffer sizes is supported in WebRTC.
{
rtc::CritScope lock(&_critSect);
// Store copies under lock and use copies hereafter to avoid race with
// setter methods.
playSampleRate = _playSampleRate;
playBytesPerSample = _playBytesPerSample;
playChannels = _playChannels;
// Ensure that user has initialized all essential members
if ((playBytesPerSample == 0) || (playChannels == 0) ||
(playSampleRate == 0)) {
RTC_NOTREACHED();
return -1;
}
_playSamples = nSamples;
_playSize = playBytesPerSample * nSamples; // {2,4}*nSamples
RTC_CHECK_LE(_playSize, kMaxBufferSizeBytes);
RTC_CHECK_EQ(nSamples, _playSamples);
}
size_t nSamplesOut(0);
AllocatePlayoutBufferIfNeeded();
RTC_CHECK(play_buffer_);
// WebRTC can only provide audio in 10ms chunks, hence we fail if the native
// audio layer asks for something else.
RTC_CHECK_EQ(num_samples, play_samples_per_10ms_);
rtc::CritScope lock(&_critSectCb);
// It is currently supported to start playout without a valid audio
// transport object. Leads to warning and silence.
if (!_ptrCbAudioTransport) {
if (!audio_transport_cb_) {
LOG(LS_WARNING) << "Invalid audio transport";
return 0;
}
@ -405,9 +324,11 @@ int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples) {
uint32_t res(0);
int64_t elapsed_time_ms = -1;
int64_t ntp_time_ms = -1;
res = _ptrCbAudioTransport->NeedMorePlayData(
_playSamples, playBytesPerSample, playChannels, playSampleRate,
&_playBuffer[0], nSamplesOut, &elapsed_time_ms, &ntp_time_ms);
size_t num_samples_out(0);
res = audio_transport_cb_->NeedMorePlayData(
play_samples_per_10ms_, play_bytes_per_sample_, play_channels_,
play_sample_rate_, &play_buffer_[0], num_samples_out, &elapsed_time_ms,
&ntp_time_ms);
if (res != 0) {
LOG(LS_ERROR) << "NeedMorePlayData() failed";
}
@ -415,23 +336,46 @@ int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples) {
// Update some stats but do it on the task queue to ensure that access of
// members is serialized hence avoiding usage of locks.
task_queue_.PostTask(
rtc::Bind(&AudioDeviceBuffer::UpdatePlayStats, this, nSamplesOut));
return static_cast<int32_t>(nSamplesOut);
rtc::Bind(&AudioDeviceBuffer::UpdatePlayStats, this, num_samples_out));
return static_cast<int32_t>(num_samples_out);
}
int32_t AudioDeviceBuffer::GetPlayoutData(void* audioBuffer) {
int32_t AudioDeviceBuffer::GetPlayoutData(void* audio_buffer) {
rtc::CritScope lock(&_critSect);
RTC_CHECK_LE(_playSize, kMaxBufferSizeBytes);
memcpy(audio_buffer, &play_buffer_[0], play_bytes_per_10ms_);
return static_cast<int32_t>(play_samples_per_10ms_);
}
memcpy(audioBuffer, &_playBuffer[0], _playSize);
void AudioDeviceBuffer::AllocatePlayoutBufferIfNeeded() {
RTC_CHECK(play_bytes_per_sample_);
if (play_buffer_)
return;
LOG(INFO) << __FUNCTION__;
rtc::CritScope lock(&_critSect);
// Derive the required buffer size given sample rate and number of channels.
play_samples_per_10ms_ = static_cast<size_t>(play_sample_rate_ * 10 / 1000);
play_bytes_per_10ms_ = play_bytes_per_sample_ * play_samples_per_10ms_;
LOG(INFO) << "playout samples per 10ms: " << play_samples_per_10ms_;
LOG(INFO) << "playout bytes per 10ms: " << play_bytes_per_10ms_;
// Allocate memory for the playout audio buffer. It will always contain audio
// samples corresponding to 10ms of audio to be played out.
play_buffer_.reset(new int8_t[play_bytes_per_10ms_]);
}
if (_playFile.is_open()) {
// write to binary file in mono or stereo (interleaved)
_playFile.Write(&_playBuffer[0], _playSize);
}
return static_cast<int32_t>(_playSamples);
void AudioDeviceBuffer::AllocateRecordingBufferIfNeeded() {
RTC_CHECK(rec_bytes_per_sample_);
if (rec_buffer_)
return;
LOG(INFO) << __FUNCTION__;
rtc::CritScope lock(&_critSect);
// Derive the required buffer size given sample rate and number of channels.
rec_samples_per_10ms_ = static_cast<size_t>(rec_sample_rate_ * 10 / 1000);
rec_bytes_per_10ms_ = rec_bytes_per_sample_ * rec_samples_per_10ms_;
LOG(INFO) << "recorded samples per 10ms: " << rec_samples_per_10ms_;
LOG(INFO) << "recorded bytes per 10ms: " << rec_bytes_per_10ms_;
// Allocate memory for the recording audio buffer. It will always contain
// audio samples corresponding to 10ms of audio.
rec_buffer_.reset(new int8_t[rec_bytes_per_10ms_]);
}
void AudioDeviceBuffer::StartTimer() {
@ -455,7 +399,7 @@ void AudioDeviceBuffer::LogStats() {
uint32_t diff_samples = rec_samples_ - last_rec_samples_;
uint32_t rate = diff_samples / kTimerIntervalInSeconds;
LOG(INFO) << "[REC : " << time_since_last << "msec, "
<< _recSampleRate / 1000
<< rec_sample_rate_ / 1000
<< "kHz] callbacks: " << rec_callbacks_ - last_rec_callbacks_
<< ", "
<< "samples: " << diff_samples << ", "
@ -464,7 +408,7 @@ void AudioDeviceBuffer::LogStats() {
diff_samples = play_samples_ - last_play_samples_;
rate = diff_samples / kTimerIntervalInSeconds;
LOG(INFO) << "[PLAY: " << time_since_last << "msec, "
<< _playSampleRate / 1000
<< play_sample_rate_ / 1000
<< "kHz] callbacks: " << play_callbacks_ - last_play_callbacks_
<< ", "
<< "samples: " << diff_samples << ", "

View File

@ -21,11 +21,11 @@
namespace webrtc {
class CriticalSectionWrapper;
const uint32_t kPulsePeriodMs = 1000;
const size_t kMaxBufferSizeBytes = 3840; // 10ms in stereo @ 96kHz
// Delta times between two successive playout callbacks are limited to this
// value before added to an internal array.
const size_t kMaxDeltaTimeInMs = 500;
// TODO(henrika): remove when no longer used by external client.
const size_t kMaxBufferSizeBytes = 3840; // 10ms in stereo @ 96kHz
class AudioDeviceObserver;
@ -35,40 +35,47 @@ class AudioDeviceBuffer {
virtual ~AudioDeviceBuffer();
void SetId(uint32_t id) {};
int32_t RegisterAudioCallback(AudioTransport* audioCallback);
int32_t RegisterAudioCallback(AudioTransport* audio_callback);
int32_t InitPlayout();
int32_t InitRecording();
virtual int32_t SetRecordingSampleRate(uint32_t fsHz);
virtual int32_t SetPlayoutSampleRate(uint32_t fsHz);
int32_t SetRecordingSampleRate(uint32_t fsHz);
int32_t SetPlayoutSampleRate(uint32_t fsHz);
int32_t RecordingSampleRate() const;
int32_t PlayoutSampleRate() const;
virtual int32_t SetRecordingChannels(size_t channels);
virtual int32_t SetPlayoutChannels(size_t channels);
int32_t SetRecordingChannels(size_t channels);
int32_t SetPlayoutChannels(size_t channels);
size_t RecordingChannels() const;
size_t PlayoutChannels() const;
int32_t SetRecordingChannel(const AudioDeviceModule::ChannelType channel);
int32_t RecordingChannel(AudioDeviceModule::ChannelType& channel) const;
virtual int32_t SetRecordedBuffer(const void* audioBuffer, size_t nSamples);
virtual int32_t SetRecordedBuffer(const void* audio_buffer,
size_t num_samples);
int32_t SetCurrentMicLevel(uint32_t level);
virtual void SetVQEData(int playDelayMS, int recDelayMS, int clockDrift);
virtual void SetVQEData(int play_delay_ms, int rec_delay_ms, int clock_drift);
virtual int32_t DeliverRecordedData();
uint32_t NewMicLevel() const;
virtual int32_t RequestPlayoutData(size_t nSamples);
virtual int32_t GetPlayoutData(void* audioBuffer);
virtual int32_t RequestPlayoutData(size_t num_samples);
virtual int32_t GetPlayoutData(void* audio_buffer);
// TODO(henrika): these methods should not be used and does not contain any
// valid implementation. Investigate the possibility to either remove them
// or add a proper implementation if needed.
int32_t StartInputFileRecording(const char fileName[kAdmMaxFileNameSize]);
int32_t StopInputFileRecording();
int32_t StartOutputFileRecording(const char fileName[kAdmMaxFileNameSize]);
int32_t StopOutputFileRecording();
int32_t SetTypingStatus(bool typingStatus);
int32_t SetTypingStatus(bool typing_status);
private:
void AllocatePlayoutBufferIfNeeded();
void AllocateRecordingBufferIfNeeded();
// Posts the first delayed task in the task queue and starts the periodic
// timer.
void StartTimer();
@ -86,11 +93,15 @@ class AudioDeviceBuffer {
// creates this object.
rtc::ThreadChecker thread_checker_;
// Raw pointer to AudioTransport instance. Supplied to RegisterAudioCallback()
// and it must outlive this object.
AudioTransport* audio_transport_cb_;
// TODO(henrika): given usage of thread checker, it should be possible to
// remove all locks in this class.
rtc::CriticalSection _critSect;
rtc::CriticalSection _critSectCb;
AudioTransport* _ptrCbAudioTransport;
// Task queue used to invoke LogStats() periodically. Tasks are executed on a
// worker thread but it does not necessarily have to be the same thread for
// each task.
@ -99,45 +110,50 @@ class AudioDeviceBuffer {
// Ensures that the timer is only started once.
bool timer_has_started_;
uint32_t _recSampleRate;
uint32_t _playSampleRate;
// Sample rate in Hertz.
uint32_t rec_sample_rate_;
uint32_t play_sample_rate_;
size_t _recChannels;
size_t _playChannels;
// Number of audio channels.
size_t rec_channels_;
size_t play_channels_;
// selected recording channel (left/right/both)
AudioDeviceModule::ChannelType _recChannel;
AudioDeviceModule::ChannelType rec_channel_;
// 2 or 4 depending on mono or stereo
size_t _recBytesPerSample;
size_t _playBytesPerSample;
// Number of bytes per audio sample (2 or 4).
size_t rec_bytes_per_sample_;
size_t play_bytes_per_sample_;
// 10ms in stereo @ 96kHz
int8_t _recBuffer[kMaxBufferSizeBytes];
// Number of audio samples/bytes per 10ms.
size_t rec_samples_per_10ms_;
size_t rec_bytes_per_10ms_;
size_t play_samples_per_10ms_;
size_t play_bytes_per_10ms_;
// one sample <=> 2 or 4 bytes
size_t _recSamples;
size_t _recSize; // in bytes
// Buffer used for recorded audio samples. Size is given by
// |rec_bytes_per_10ms_| and the buffer is allocated in InitRecording() on the
// main/creating thread.
std::unique_ptr<int8_t[]> rec_buffer_;
// 10ms in stereo @ 96kHz
int8_t _playBuffer[kMaxBufferSizeBytes];
// Buffer used for audio samples to be played out. Size is given by
// |play_bytes_per_10ms_| and the buffer is allocated in InitPlayout() on the
// main/creating thread.
std::unique_ptr<int8_t[]> play_buffer_;
// one sample <=> 2 or 4 bytes
size_t _playSamples;
size_t _playSize; // in bytes
// AGC parameters.
uint32_t current_mic_level_;
uint32_t new_mic_level_;
FileWrapper& _recFile;
FileWrapper& _playFile;
// Contains true of a key-press has been detected.
bool typing_status_;
uint32_t _currentMicLevel;
uint32_t _newMicLevel;
// Delay values used by the AEC.
int play_delay_ms_;
int rec_delay_ms_;
bool _typingStatus;
int _playDelayMS;
int _recDelayMS;
int _clockDrift;
int high_delay_counter_;
// Contains a clock-drift measurement.
int clock_drift_;
// Counts number of times LogStats() has been called.
size_t num_stat_reports_;