Swap use of CriticalSectionWrapper with rtc::CriticalSection in voice_engine/

Also remove mischievous tab character!
This is a part of getting rid of CriticalSectionWrapper and makes the code slightly simpler.

BUG=

Review URL: https://codereview.webrtc.org/1607353002

Cr-Commit-Position: refs/heads/master@{#11346}
This commit is contained in:
tommi 2016-01-21 10:37:37 -08:00 committed by Commit bot
parent 8947a01e05
commit 31fc21f454
39 changed files with 232 additions and 293 deletions

View File

@ -14,6 +14,7 @@
#include <utility>
#include "webrtc/base/checks.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/format_macros.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/thread_checker.h"
@ -30,7 +31,6 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/modules/utility/include/process_thread.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/include/voe_base.h"
#include "webrtc/voice_engine/include/voe_external_media.h"
@ -157,9 +157,7 @@ struct ChannelStatistics : public RtcpStatistics {
// Statistics callback, called at each generation of a new RTCP report block.
class StatisticsProxy : public RtcpStatisticsCallback {
public:
StatisticsProxy(uint32_t ssrc)
: stats_lock_(CriticalSectionWrapper::CreateCriticalSection()),
ssrc_(ssrc) {}
StatisticsProxy(uint32_t ssrc) : ssrc_(ssrc) {}
virtual ~StatisticsProxy() {}
void StatisticsUpdated(const RtcpStatistics& statistics,
@ -167,7 +165,7 @@ class StatisticsProxy : public RtcpStatisticsCallback {
if (ssrc != ssrc_)
return;
CriticalSectionScoped cs(stats_lock_.get());
rtc::CritScope cs(&stats_lock_);
stats_.rtcp = statistics;
if (statistics.jitter > stats_.max_jitter) {
stats_.max_jitter = statistics.jitter;
@ -177,7 +175,7 @@ class StatisticsProxy : public RtcpStatisticsCallback {
void CNameChanged(const char* cname, uint32_t ssrc) override {}
ChannelStatistics GetStats() {
CriticalSectionScoped cs(stats_lock_.get());
rtc::CritScope cs(&stats_lock_);
return stats_;
}
@ -185,7 +183,7 @@ class StatisticsProxy : public RtcpStatisticsCallback {
// StatisticsUpdated calls are triggered from threads in the RTP module,
// while GetStats calls can be triggered from the public voice engine API,
// hence synchronization is needed.
rtc::scoped_ptr<CriticalSectionWrapper> stats_lock_;
rtc::CriticalSection stats_lock_;
const uint32_t ssrc_;
ChannelStatistics stats_;
};
@ -298,7 +296,7 @@ Channel::InFrameType(FrameType frame_type)
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::InFrameType(frame_type=%d)", frame_type);
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
_sendFrameType = (frame_type == kAudioFrameSpeech);
return 0;
}
@ -306,7 +304,7 @@ Channel::InFrameType(FrameType frame_type)
int32_t
Channel::OnRxVadDetected(int vadDecision)
{
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (_rxVadObserverPtr)
{
_rxVadObserverPtr->OnRxVad(_channelId, vadDecision);
@ -321,7 +319,7 @@ bool Channel::SendRtp(const uint8_t* data,
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::SendPacket(channel=%d, len=%" PRIuS ")", len);
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (_transportPtr == NULL)
{
@ -352,7 +350,7 @@ Channel::SendRtcp(const uint8_t *data, size_t len)
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::SendRtcp(len=%" PRIuS ")", len);
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (_transportPtr == NULL)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
@ -566,7 +564,7 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame)
// scaling/panning, as that applies to the mix operation.
// External recipients of the audio (e.g. via AudioTrack), will do their
// own mixing/dynamic processing.
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (audio_sink_) {
AudioSinkInterface::Data data(
&audioFrame->data_[0],
@ -580,7 +578,7 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame)
float left_pan = 1.0f;
float right_pan = 1.0f;
{
CriticalSectionScoped cs(&volume_settings_critsect_);
rtc::CritScope cs(&volume_settings_critsect_);
output_gain = _outputGain;
left_pan = _panLeft;
right_pan= _panRight;
@ -620,7 +618,7 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame)
// External media
if (_outputExternalMedia)
{
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
const bool isStereo = (audioFrame->num_channels_ == 2);
if (_outputExternalMediaCallbackPtr)
{
@ -633,7 +631,7 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame)
// Record playout if enabled
{
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (_outputFileRecording && _outputFileRecorderPtr)
{
@ -660,7 +658,7 @@ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame)
(GetPlayoutFrequency() / 1000);
{
CriticalSectionScoped lock(ts_stats_lock_.get());
rtc::CritScope lock(&ts_stats_lock_);
// Compute ntp time.
audioFrame->ntp_time_ms_ = ntp_estimator_.Estimate(
audioFrame->timestamp_);
@ -704,7 +702,7 @@ Channel::NeededFrequency(int32_t id) const
// limit the spectrum anyway.
if (channel_state_.Get().output_file_playing)
{
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (_outputFilePlayerPtr)
{
if(_outputFilePlayerPtr->Frequency()>highestNeeded)
@ -790,7 +788,7 @@ Channel::RecordFileEnded(int32_t id)
assert(id == _outputFileRecorderId);
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
_outputFileRecording = false;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
@ -803,11 +801,7 @@ Channel::Channel(int32_t channelId,
uint32_t instanceId,
RtcEventLog* const event_log,
const Config& config)
: _fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
_callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
volume_settings_critsect_(
*CriticalSectionWrapper::CreateCriticalSection()),
_instanceId(instanceId),
: _instanceId(instanceId),
_channelId(channelId),
event_log_(event_log),
rtp_header_parser_(RtpHeaderParser::Create()),
@ -848,7 +842,6 @@ Channel::Channel(int32_t channelId,
playout_delay_ms_(0),
_numberOfDiscardedPackets(0),
send_sequence_number_(0),
ts_stats_lock_(CriticalSectionWrapper::CreateCriticalSection()),
rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()),
capture_start_rtp_time_stamp_(-1),
capture_start_ntp_time_ms_(-1),
@ -875,7 +868,6 @@ Channel::Channel(int32_t channelId,
_lastPayloadType(0),
_includeAudioLevelIndication(false),
_outputSpeechType(AudioFrame::kNormalSpeech),
video_sync_lock_(CriticalSectionWrapper::CreateCriticalSection()),
_average_jitter_buffer_delay_us(0),
_previousTimestamp(0),
_recPacketDelayMs(20),
@ -885,7 +877,6 @@ Channel::Channel(int32_t channelId,
restored_packet_in_use_(false),
rtcp_observer_(new VoERtcpObserver(this)),
network_predictor_(new NetworkPredictor(Clock::GetRealTimeClock())),
assoc_send_channel_lock_(CriticalSectionWrapper::CreateCriticalSection()),
associate_send_channel_(ChannelOwner(nullptr)),
pacing_enabled_(config.Get<VoicePacing>().enabled),
feedback_observer_proxy_(pacing_enabled_ ? new TransportFeedbackProxy()
@ -953,7 +944,7 @@ Channel::~Channel()
StopPlayout();
{
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (_inputFilePlayerPtr)
{
_inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
@ -999,11 +990,6 @@ Channel::~Channel()
_moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get());
// End of modules shutdown
// Delete other objects
delete &_callbackCritSect;
delete &_fileCritSect;
delete &volume_settings_critsect_;
}
int32_t
@ -1164,7 +1150,7 @@ Channel::SetEngineInformation(Statistics& engineStatistics,
ProcessThread& moduleProcessThread,
AudioDeviceModule& audioDeviceModule,
VoiceEngineObserver* voiceEngineObserver,
CriticalSectionWrapper* callbackCritSect)
rtc::CriticalSection* callbackCritSect)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::SetEngineInformation()");
@ -1187,7 +1173,7 @@ Channel::UpdateLocalTimeStamp()
}
void Channel::SetSink(rtc::scoped_ptr<AudioSinkInterface> sink) {
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
audio_sink_ = std::move(sink);
}
@ -1267,7 +1253,7 @@ Channel::StartSend()
_engineStatisticsPtr->SetLastError(
VE_RTP_RTCP_MODULE_ERROR, kTraceError,
"StartSend() RTP/RTCP failed to start sending");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
channel_state_.SetSending(false);
return -1;
}
@ -1339,7 +1325,7 @@ Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::RegisterVoiceEngineObserver()");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (_voiceEngineObserverPtr)
{
@ -1357,7 +1343,7 @@ Channel::DeRegisterVoiceEngineObserver()
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::DeRegisterVoiceEngineObserver()");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (!_voiceEngineObserverPtr)
{
@ -1664,7 +1650,7 @@ int32_t Channel::RegisterExternalTransport(Transport& transport)
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::RegisterExternalTransport()");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (_externalTransport)
{
@ -1684,7 +1670,7 @@ Channel::DeRegisterExternalTransport()
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::DeRegisterExternalTransport()");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (!_transportPtr)
{
@ -1828,7 +1814,7 @@ int32_t Channel::ReceivedRTCPPacket(const int8_t* data, size_t length) {
}
{
CriticalSectionScoped lock(ts_stats_lock_.get());
rtc::CritScope lock(&ts_stats_lock_);
ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp);
}
return 0;
@ -1857,7 +1843,7 @@ int Channel::StartPlayingFileLocally(const char* fileName,
}
{
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (_outputFilePlayerPtr)
{
@ -1936,7 +1922,7 @@ int Channel::StartPlayingFileLocally(InStream* stream,
}
{
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
// Destroy the old instance
if (_outputFilePlayerPtr)
@ -1995,7 +1981,7 @@ int Channel::StopPlayingFileLocally()
}
{
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (_outputFilePlayerPtr->StopPlayingFile() != 0)
{
@ -2047,7 +2033,7 @@ int Channel::RegisterFilePlayingToMixer()
if (_outputMixerPtr->SetAnonymousMixabilityStatus(*this, true) != 0)
{
channel_state_.SetOutputFilePlaying(false);
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
_engineStatisticsPtr->SetLastError(
VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
"StartPlayingFile() failed to add participant as file to mixer");
@ -2074,7 +2060,7 @@ int Channel::StartPlayingFileAsMicrophone(const char* fileName,
"stopPosition=%d)", fileName, loop, format, volumeScaling,
startPosition, stopPosition);
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (channel_state_.Get().input_file_playing)
{
@ -2149,7 +2135,7 @@ int Channel::StartPlayingFileAsMicrophone(InStream* stream,
return -1;
}
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (channel_state_.Get().input_file_playing)
{
@ -2205,7 +2191,7 @@ int Channel::StopPlayingFileAsMicrophone()
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::StopPlayingFileAsMicrophone()");
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (!channel_state_.Get().input_file_playing)
{
@ -2273,7 +2259,7 @@ int Channel::StartRecordingPlayout(const char* fileName,
format = kFileFormatCompressedFile;
}
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
// Destroy the old instance
if (_outputFileRecorderPtr)
@ -2350,7 +2336,7 @@ int Channel::StartRecordingPlayout(OutStream* stream,
format = kFileFormatCompressedFile;
}
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
// Destroy the old instance
if (_outputFileRecorderPtr)
@ -2401,7 +2387,7 @@ int Channel::StopRecordingPlayout()
}
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (_outputFileRecorderPtr->StopRecording() != 0)
{
@ -2421,7 +2407,7 @@ int Channel::StopRecordingPlayout()
void
Channel::SetMixWithMicStatus(bool mix)
{
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
_mixFileWithMicrophone=mix;
}
@ -2444,7 +2430,7 @@ Channel::GetSpeechOutputLevelFullRange(uint32_t& level) const
int
Channel::SetMute(bool enable)
{
CriticalSectionScoped cs(&volume_settings_critsect_);
rtc::CritScope cs(&volume_settings_critsect_);
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::SetMute(enable=%d)", enable);
_mute = enable;
@ -2454,14 +2440,14 @@ Channel::SetMute(bool enable)
bool
Channel::Mute() const
{
CriticalSectionScoped cs(&volume_settings_critsect_);
rtc::CritScope cs(&volume_settings_critsect_);
return _mute;
}
int
Channel::SetOutputVolumePan(float left, float right)
{
CriticalSectionScoped cs(&volume_settings_critsect_);
rtc::CritScope cs(&volume_settings_critsect_);
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::SetOutputVolumePan()");
_panLeft = left;
@ -2472,7 +2458,7 @@ Channel::SetOutputVolumePan(float left, float right)
int
Channel::GetOutputVolumePan(float& left, float& right) const
{
CriticalSectionScoped cs(&volume_settings_critsect_);
rtc::CritScope cs(&volume_settings_critsect_);
left = _panLeft;
right = _panRight;
return 0;
@ -2481,7 +2467,7 @@ Channel::GetOutputVolumePan(float& left, float& right) const
int
Channel::SetChannelOutputVolumeScaling(float scaling)
{
CriticalSectionScoped cs(&volume_settings_critsect_);
rtc::CritScope cs(&volume_settings_critsect_);
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::SetChannelOutputVolumeScaling()");
_outputGain = scaling;
@ -2491,7 +2477,7 @@ Channel::SetChannelOutputVolumeScaling(float scaling)
int
Channel::GetChannelOutputVolumeScaling(float& scaling) const
{
CriticalSectionScoped cs(&volume_settings_critsect_);
rtc::CritScope cs(&volume_settings_critsect_);
scaling = _outputGain;
return 0;
}
@ -2601,7 +2587,7 @@ Channel::RegisterRxVadObserver(VoERxVadCallback &observer)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::RegisterRxVadObserver()");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (_rxVadObserverPtr)
{
@ -2620,7 +2606,7 @@ Channel::DeRegisterRxVadObserver()
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::DeRegisterRxVadObserver()");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (!_rxVadObserverPtr)
{
@ -3260,7 +3246,7 @@ Channel::GetRTPStatistics(CallStatistics& stats)
// --- Timestamps
{
CriticalSectionScoped lock(ts_stats_lock_.get());
rtc::CritScope lock(&ts_stats_lock_);
stats.capture_start_ntp_time_ms_ = capture_start_ntp_time_ms_;
}
return 0;
@ -3401,7 +3387,7 @@ Channel::PrepareEncodeAndSend(int mixingFrequency)
if (channel_state_.Get().input_external_media)
{
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
const bool isStereo = (_audioFrame.num_channels_ == 2);
if (_inputExternalMediaCallbackPtr)
{
@ -3465,7 +3451,7 @@ Channel::EncodeAndSend()
}
void Channel::DisassociateSendChannel(int channel_id) {
CriticalSectionScoped lock(assoc_send_channel_lock_.get());
rtc::CritScope lock(&assoc_send_channel_lock_);
Channel* channel = associate_send_channel_.channel();
if (channel && channel->ChannelId() == channel_id) {
// If this channel is associated with a send channel of the specified
@ -3482,7 +3468,7 @@ int Channel::RegisterExternalMediaProcessing(
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::RegisterExternalMediaProcessing()");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (kPlaybackPerChannel == type)
{
@ -3518,7 +3504,7 @@ int Channel::DeRegisterExternalMediaProcessing(ProcessingTypes type)
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::DeRegisterExternalMediaProcessing()");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (kPlaybackPerChannel == type)
{
@ -3580,7 +3566,7 @@ void Channel::GetDecodingCallStatistics(AudioDecodingCallStats* stats) const {
bool Channel::GetDelayEstimate(int* jitter_buffer_delay_ms,
int* playout_buffer_delay_ms) const {
CriticalSectionScoped cs(video_sync_lock_.get());
rtc::CritScope lock(&video_sync_lock_);
if (_average_jitter_buffer_delay_us == 0) {
return false;
}
@ -3627,7 +3613,7 @@ Channel::SetMinimumPlayoutDelay(int delayMs)
int Channel::GetPlayoutTimestamp(unsigned int& timestamp) {
uint32_t playout_timestamp_rtp = 0;
{
CriticalSectionScoped cs(video_sync_lock_.get());
rtc::CritScope lock(&video_sync_lock_);
playout_timestamp_rtp = playout_timestamp_rtp_;
}
if (playout_timestamp_rtp == 0) {
@ -3681,7 +3667,7 @@ Channel::MixOrReplaceAudioWithFile(int mixingFrequency)
size_t fileSamples(0);
{
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (_inputFilePlayerPtr == NULL)
{
@ -3751,7 +3737,7 @@ Channel::MixAudioWithFile(AudioFrame& audioFrame,
size_t fileSamples(0);
{
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (_outputFilePlayerPtr == NULL)
{
@ -3900,7 +3886,7 @@ void Channel::UpdatePlayoutTimestamp(bool rtcp) {
playout_timestamp);
{
CriticalSectionScoped cs(video_sync_lock_.get());
rtc::CritScope lock(&video_sync_lock_);
if (rtcp) {
playout_timestamp_rtcp_ = playout_timestamp;
} else {
@ -3941,7 +3927,7 @@ void Channel::UpdatePacketDelay(uint32_t rtp_timestamp,
if (timestamp_diff_ms == 0) return;
{
CriticalSectionScoped cs(video_sync_lock_.get());
rtc::CritScope lock(&video_sync_lock_);
if (packet_delay_ms >= 10 && packet_delay_ms <= 60) {
_recPacketDelayMs = packet_delay_ms;
@ -4085,7 +4071,7 @@ int64_t Channel::GetRTT(bool allow_associate_channel) const {
int64_t rtt = 0;
if (report_blocks.empty()) {
if (allow_associate_channel) {
CriticalSectionScoped lock(assoc_send_channel_lock_.get());
rtc::CritScope lock(&assoc_send_channel_lock_);
Channel* channel = associate_send_channel_.channel();
// Tries to get RTT from an associated channel. This is important for
// receive-only channels.

View File

@ -47,7 +47,6 @@ namespace webrtc {
class AudioDeviceModule;
class Config;
class CriticalSectionWrapper;
class FileWrapper;
class PacketRouter;
class ProcessThread;
@ -103,57 +102,56 @@ class ChannelState {
bool receiving;
};
ChannelState() : lock_(CriticalSectionWrapper::CreateCriticalSection()) {
}
ChannelState() {}
virtual ~ChannelState() {}
void Reset() {
CriticalSectionScoped lock(lock_.get());
rtc::CritScope lock(&lock_);
state_ = State();
}
State Get() const {
CriticalSectionScoped lock(lock_.get());
rtc::CritScope lock(&lock_);
return state_;
}
void SetRxApmIsEnabled(bool enable) {
CriticalSectionScoped lock(lock_.get());
rtc::CritScope lock(&lock_);
state_.rx_apm_is_enabled = enable;
}
void SetInputExternalMedia(bool enable) {
CriticalSectionScoped lock(lock_.get());
rtc::CritScope lock(&lock_);
state_.input_external_media = enable;
}
void SetOutputFilePlaying(bool enable) {
CriticalSectionScoped lock(lock_.get());
rtc::CritScope lock(&lock_);
state_.output_file_playing = enable;
}
void SetInputFilePlaying(bool enable) {
CriticalSectionScoped lock(lock_.get());
rtc::CritScope lock(&lock_);
state_.input_file_playing = enable;
}
void SetPlaying(bool enable) {
CriticalSectionScoped lock(lock_.get());
rtc::CritScope lock(&lock_);
state_.playing = enable;
}
void SetSending(bool enable) {
CriticalSectionScoped lock(lock_.get());
rtc::CritScope lock(&lock_);
state_.sending = enable;
}
void SetReceiving(bool enable) {
CriticalSectionScoped lock(lock_.get());
rtc::CritScope lock(&lock_);
state_.receiving = enable;
}
private:
rtc::scoped_ptr<CriticalSectionWrapper> lock_;
mutable rtc::CriticalSection lock_;
State state_;
};
@ -190,7 +188,7 @@ public:
ProcessThread& moduleProcessThread,
AudioDeviceModule& audioDeviceModule,
VoiceEngineObserver* voiceEngineObserver,
CriticalSectionWrapper* callbackCritSect);
rtc::CriticalSection* callbackCritSect);
int32_t UpdateLocalTimeStamp();
void SetSink(rtc::scoped_ptr<AudioSinkInterface> sink);
@ -430,7 +428,7 @@ public:
}
bool ExternalTransport() const
{
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
return _externalTransport;
}
bool ExternalMixing() const
@ -460,7 +458,7 @@ public:
// Used for obtaining RTT for a receive-only channel.
void set_associate_send_channel(const ChannelOwner& channel) {
assert(_channelId != channel.channel()->ChannelId());
CriticalSectionScoped lock(assoc_send_channel_lock_.get());
rtc::CritScope lock(&assoc_send_channel_lock_);
associate_send_channel_ = channel;
}
@ -494,9 +492,9 @@ private:
int32_t GetPlayoutFrequency();
int64_t GetRTT(bool allow_associate_channel) const;
CriticalSectionWrapper& _fileCritSect;
CriticalSectionWrapper& _callbackCritSect;
CriticalSectionWrapper& volume_settings_critsect_;
mutable rtc::CriticalSection _fileCritSect;
mutable rtc::CriticalSection _callbackCritSect;
mutable rtc::CriticalSection volume_settings_critsect_;
uint32_t _instanceId;
int32_t _channelId;
@ -544,7 +542,7 @@ private:
uint16_t send_sequence_number_;
uint8_t restored_packet_[kVoiceEngineMaxIpPacketSizeBytes];
rtc::scoped_ptr<CriticalSectionWrapper> ts_stats_lock_;
mutable rtc::CriticalSection ts_stats_lock_;
rtc::scoped_ptr<rtc::TimestampWrapAroundHandler> rtp_ts_wraparound_handler_;
// The rtp timestamp of the first played out audio frame.
@ -560,7 +558,7 @@ private:
ProcessThread* _moduleProcessThreadPtr;
AudioDeviceModule* _audioDeviceModulePtr;
VoiceEngineObserver* _voiceEngineObserverPtr; // owned by base
CriticalSectionWrapper* _callbackCritSectPtr; // owned by base
rtc::CriticalSection* _callbackCritSectPtr; // owned by base
Transport* _transportPtr; // WebRtc socket or external transport
RMSLevel rms_level_;
rtc::scoped_ptr<AudioProcessing> rx_audioproc_; // far end AudioProcessing
@ -585,7 +583,7 @@ private:
// VoENetwork
AudioFrame::SpeechType _outputSpeechType;
// VoEVideoSync
rtc::scoped_ptr<CriticalSectionWrapper> video_sync_lock_;
mutable rtc::CriticalSection video_sync_lock_;
uint32_t _average_jitter_buffer_delay_us GUARDED_BY(video_sync_lock_);
uint32_t _previousTimestamp;
uint16_t _recPacketDelayMs GUARDED_BY(video_sync_lock_);
@ -598,7 +596,7 @@ private:
rtc::scoped_ptr<VoERtcpObserver> rtcp_observer_;
rtc::scoped_ptr<NetworkPredictor> network_predictor_;
// An associated send channel.
rtc::scoped_ptr<CriticalSectionWrapper> assoc_send_channel_lock_;
mutable rtc::CriticalSection assoc_send_channel_lock_;
ChannelOwner associate_send_channel_ GUARDED_BY(assoc_send_channel_lock_);
bool pacing_enabled_;

View File

@ -48,7 +48,6 @@ ChannelOwner::ChannelRef::ChannelRef(class Channel* channel)
ChannelManager::ChannelManager(uint32_t instance_id, const Config& config)
: instance_id_(instance_id),
last_channel_id_(-1),
lock_(CriticalSectionWrapper::CreateCriticalSection()),
config_(config),
event_log_(RtcEventLog::Create()) {}
@ -66,7 +65,7 @@ ChannelOwner ChannelManager::CreateChannelInternal(const Config& config) {
event_log_.get(), config);
ChannelOwner channel_owner(channel);
CriticalSectionScoped crit(lock_.get());
rtc::CritScope crit(&lock_);
channels_.push_back(channel_owner);
@ -74,7 +73,7 @@ ChannelOwner ChannelManager::CreateChannelInternal(const Config& config) {
}
ChannelOwner ChannelManager::GetChannel(int32_t channel_id) {
CriticalSectionScoped crit(lock_.get());
rtc::CritScope crit(&lock_);
for (size_t i = 0; i < channels_.size(); ++i) {
if (channels_[i].channel()->ChannelId() == channel_id)
@ -84,7 +83,7 @@ ChannelOwner ChannelManager::GetChannel(int32_t channel_id) {
}
void ChannelManager::GetAllChannels(std::vector<ChannelOwner>* channels) {
CriticalSectionScoped crit(lock_.get());
rtc::CritScope crit(&lock_);
*channels = channels_;
}
@ -95,7 +94,7 @@ void ChannelManager::DestroyChannel(int32_t channel_id) {
// Channels while holding a lock, but rather when the method returns.
ChannelOwner reference(NULL);
{
CriticalSectionScoped crit(lock_.get());
rtc::CritScope crit(&lock_);
std::vector<ChannelOwner>::iterator to_delete = channels_.end();
for (auto it = channels_.begin(); it != channels_.end(); ++it) {
Channel* channel = it->channel();
@ -119,14 +118,14 @@ void ChannelManager::DestroyAllChannels() {
// lock, but rather when the method returns.
std::vector<ChannelOwner> references;
{
CriticalSectionScoped crit(lock_.get());
rtc::CritScope crit(&lock_);
references = channels_;
channels_.clear();
}
}
size_t ChannelManager::NumOfChannels() const {
CriticalSectionScoped crit(lock_.get());
rtc::CritScope crit(&lock_);
return channels_.size();
}

View File

@ -14,10 +14,10 @@
#include <vector>
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/call/rtc_event_log.h"
#include "webrtc/system_wrappers/include/atomic32.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@ -123,7 +123,7 @@ class ChannelManager {
Atomic32 last_channel_id_;
rtc::scoped_ptr<CriticalSectionWrapper> lock_;
mutable rtc::CriticalSection lock_;
std::vector<ChannelOwner> channels_;
const Config& config_;

View File

@ -12,7 +12,6 @@
#include <assert.h>
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
namespace webrtc {
@ -66,7 +65,6 @@ const int16_t Dtmf_dBm0kHz[37]=
DtmfInband::DtmfInband(int32_t id) :
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
_id(id),
_outputFrequencyHz(8000),
_frameLengthSamples(0),
@ -84,7 +82,6 @@ DtmfInband::DtmfInband(int32_t id) :
DtmfInband::~DtmfInband()
{
delete &_critSect;
}
int
@ -109,7 +106,7 @@ DtmfInband::GetSampleRate(uint16_t& frequency)
return 0;
}
void
void
DtmfInband::Init()
{
_remainingSamples = 0;
@ -130,7 +127,7 @@ DtmfInband::AddTone(uint8_t eventCode,
int32_t lengthMs,
int32_t attenuationDb)
{
CriticalSectionScoped lock(&_critSect);
rtc::CritScope lock(&_critSect);
if (attenuationDb > 36 || eventCode > 15)
{
@ -159,7 +156,7 @@ DtmfInband::AddTone(uint8_t eventCode,
int
DtmfInband::ResetTone()
{
CriticalSectionScoped lock(&_critSect);
rtc::CritScope lock(&_critSect);
ReInit();
@ -174,7 +171,7 @@ int
DtmfInband::StartTone(uint8_t eventCode,
int32_t attenuationDb)
{
CriticalSectionScoped lock(&_critSect);
rtc::CritScope lock(&_critSect);
if (attenuationDb > 36 || eventCode > 15)
{
@ -200,7 +197,7 @@ DtmfInband::StartTone(uint8_t eventCode,
int
DtmfInband::StopTone()
{
CriticalSectionScoped lock(&_critSect);
rtc::CritScope lock(&_critSect);
if (!_playing)
{
@ -213,16 +210,16 @@ DtmfInband::StopTone()
}
// Shall be called between tones
void
void
DtmfInband::ReInit()
{
_reinit = true;
}
bool
bool
DtmfInband::IsAddingTone()
{
CriticalSectionScoped lock(&_critSect);
rtc::CritScope lock(&_critSect);
return (_remainingSamples > 0 || _playing);
}
@ -230,7 +227,7 @@ int
DtmfInband::Get10msTone(int16_t output[320],
uint16_t& outputSizeInSamples)
{
CriticalSectionScoped lock(&_critSect);
rtc::CritScope lock(&_critSect);
if (DtmfFix_generate(output,
_eventCode,
_attenuationDb,
@ -248,6 +245,7 @@ DtmfInband::Get10msTone(int16_t output[320],
void
DtmfInband::UpdateDelaySinceLastTone()
{
rtc::CritScope lock(&_critSect);
_delaySinceLastToneMS += kDtmfFrameSizeMs;
// avoid wraparound
if (_delaySinceLastToneMS > (1<<30))
@ -259,6 +257,7 @@ DtmfInband::UpdateDelaySinceLastTone()
uint32_t
DtmfInband::DelaySinceLastTone() const
{
rtc::CritScope lock(&_critSect);
return _delaySinceLastToneMS;
}

View File

@ -13,9 +13,9 @@
#include "webrtc/typedefs.h"
#include "webrtc/voice_engine/voice_engine_defines.h"
#include "webrtc/base/criticalsection.h"
namespace webrtc {
class CriticalSectionWrapper;
class DtmfInband
{
@ -67,7 +67,7 @@ private:
int16_t length);
private:
CriticalSectionWrapper& _critSect;
mutable rtc::CriticalSection _critSect;
int32_t _id;
uint16_t _outputFrequencyHz; // {8000, 16000, 32000}
int16_t _oldOutputLow[2]; // Data needed for oscillator model

View File

@ -15,7 +15,6 @@ namespace webrtc {
DtmfInbandQueue::DtmfInbandQueue(int32_t id):
_id(id),
_DtmfCritsect(*CriticalSectionWrapper::CreateCriticalSection()),
_nextEmptyIndex(0)
{
memset(_DtmfKey,0, sizeof(_DtmfKey));
@ -25,13 +24,12 @@ DtmfInbandQueue::DtmfInbandQueue(int32_t id):
DtmfInbandQueue::~DtmfInbandQueue()
{
delete &_DtmfCritsect;
}
int
DtmfInbandQueue::AddDtmf(uint8_t key, uint16_t len, uint8_t level)
{
CriticalSectionScoped lock(&_DtmfCritsect);
rtc::CritScope lock(&_DtmfCritsect);
if (_nextEmptyIndex >= kDtmfInbandMax)
{
@ -50,7 +48,7 @@ DtmfInbandQueue::AddDtmf(uint8_t key, uint16_t len, uint8_t level)
int8_t
DtmfInbandQueue::NextDtmf(uint16_t* len, uint8_t* level)
{
CriticalSectionScoped lock(&_DtmfCritsect);
rtc::CritScope lock(&_DtmfCritsect);
if(!PendingDtmf())
{
@ -74,14 +72,14 @@ DtmfInbandQueue::NextDtmf(uint16_t* len, uint8_t* level)
bool
DtmfInbandQueue::PendingDtmf()
{
CriticalSectionScoped lock(&_DtmfCritsect);
rtc::CritScope lock(&_DtmfCritsect);
return _nextEmptyIndex > 0;
}
void
DtmfInbandQueue::ResetDtmf()
{
CriticalSectionScoped lock(&_DtmfCritsect);
rtc::CritScope lock(&_DtmfCritsect);
_nextEmptyIndex = 0;
}

View File

@ -11,7 +11,7 @@
#ifndef WEBRTC_VOICE_ENGINE_DTMF_INBAND_QUEUE_H
#define WEBRTC_VOICE_ENGINE_DTMF_INBAND_QUEUE_H
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/typedefs.h"
#include "webrtc/voice_engine/voice_engine_defines.h"
@ -38,7 +38,7 @@ private:
enum {kDtmfInbandMax = 20};
int32_t _id;
CriticalSectionWrapper& _DtmfCritsect;
rtc::CriticalSection _DtmfCritsect;
uint8_t _nextEmptyIndex;
uint8_t _DtmfKey[kDtmfInbandMax];
uint16_t _DtmfLen[kDtmfInbandMax];

View File

@ -10,7 +10,6 @@
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/voice_engine/level_indicator.h"
namespace webrtc {
@ -25,7 +24,6 @@ const int8_t permutation[33] =
AudioLevel::AudioLevel() :
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
_absMax(0),
_count(0),
_currentLevel(0),
@ -33,12 +31,11 @@ AudioLevel::AudioLevel() :
}
AudioLevel::~AudioLevel() {
delete &_critSect;
}
void AudioLevel::Clear()
{
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
_absMax = 0;
_count = 0;
_currentLevel = 0;
@ -56,7 +53,7 @@ void AudioLevel::ComputeLevel(const AudioFrame& audioFrame)
// Protect member access using a lock since this method is called on a
// dedicated audio thread in the RecordedDataIsAvailable() callback.
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
if (absValue > _absMax)
_absMax = absValue;
@ -88,13 +85,13 @@ void AudioLevel::ComputeLevel(const AudioFrame& audioFrame)
int8_t AudioLevel::Level() const
{
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
return _currentLevel;
}
int16_t AudioLevel::LevelFullRange() const
{
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
return _currentLevelFullRange;
}

View File

@ -11,13 +11,13 @@
#ifndef WEBRTC_VOICE_ENGINE_LEVEL_INDICATOR_H
#define WEBRTC_VOICE_ENGINE_LEVEL_INDICATOR_H
#include "webrtc/base/criticalsection.h"
#include "webrtc/typedefs.h"
#include "webrtc/voice_engine/voice_engine_defines.h"
namespace webrtc {
class AudioFrame;
class CriticalSectionWrapper;
namespace voe {
class AudioLevel
@ -40,7 +40,7 @@ public:
private:
enum { kUpdateFrequency = 10};
CriticalSectionWrapper& _critSect;
mutable rtc::CriticalSection _critSect;
int16_t _absMax;
int16_t _count;

View File

@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/voice_engine/monitor_module.h"
@ -18,20 +17,18 @@ namespace voe {
MonitorModule::MonitorModule() :
_observerPtr(NULL),
_callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
_lastProcessTime(TickTime::MillisecondTimestamp())
{
}
MonitorModule::~MonitorModule()
{
delete &_callbackCritSect;
}
int32_t
MonitorModule::RegisterObserver(MonitorObserver& observer)
{
CriticalSectionScoped lock(&_callbackCritSect);
rtc::CritScope lock(&_callbackCritSect);
if (_observerPtr)
{
return -1;
@ -43,7 +40,7 @@ MonitorModule::RegisterObserver(MonitorObserver& observer)
int32_t
MonitorModule::DeRegisterObserver()
{
CriticalSectionScoped lock(&_callbackCritSect);
rtc::CritScope lock(&_callbackCritSect);
if (!_observerPtr)
{
return 0;
@ -64,9 +61,9 @@ int32_t
MonitorModule::Process()
{
_lastProcessTime = TickTime::MillisecondTimestamp();
rtc::CritScope lock(&_callbackCritSect);
if (_observerPtr)
{
CriticalSectionScoped lock(&_callbackCritSect);
_observerPtr->OnPeriodicProcess();
}
return 0;

View File

@ -11,6 +11,8 @@
#ifndef WEBRTC_VOICE_ENGINE_MONITOR_MODULE_H
#define WEBRTC_VOICE_ENGINE_MONITOR_MODULE_H
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/include/module.h"
#include "webrtc/typedefs.h"
#include "webrtc/voice_engine/voice_engine_defines.h"
@ -25,8 +27,6 @@ protected:
namespace webrtc {
class CriticalSectionWrapper;
namespace voe {
class MonitorModule : public Module
@ -45,8 +45,8 @@ public: // module
int32_t Process() override;
private:
MonitorObserver* _observerPtr;
CriticalSectionWrapper& _callbackCritSect;
rtc::CriticalSection _callbackCritSect;
MonitorObserver* _observerPtr GUARDED_BY(_callbackCritSect);
int64_t _lastProcessTime;
};

View File

@ -13,7 +13,6 @@
#include "webrtc/base/format_macros.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/include/voe_external_media.h"
@ -68,7 +67,7 @@ void OutputMixer::RecordFileEnded(int32_t id)
"OutputMixer::RecordFileEnded(id=%d)", id);
assert(id == _instanceId);
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
_outputFileRecording = false;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::RecordFileEnded() =>"
@ -92,8 +91,6 @@ OutputMixer::Create(OutputMixer*& mixer, uint32_t instanceId)
}
OutputMixer::OutputMixer(uint32_t instanceId) :
_callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
_fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
_mixerModule(*AudioConferenceMixer::Create(instanceId)),
_audioLevel(),
_dtmfGenerator(instanceId),
@ -138,7 +135,7 @@ OutputMixer::~OutputMixer()
DeRegisterExternalMediaProcessing();
}
{
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (_outputFileRecorderPtr)
{
_outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
@ -149,8 +146,6 @@ OutputMixer::~OutputMixer()
}
_mixerModule.UnRegisterMixedStreamCallback();
delete &_mixerModule;
delete &_callbackCritSect;
delete &_fileCritSect;
}
int32_t
@ -178,7 +173,7 @@ int OutputMixer::RegisterExternalMediaProcessing(
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::RegisterExternalMediaProcessing()");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
_externalMediaCallbackPtr = &proccess_object;
_externalMedia = true;
@ -190,7 +185,7 @@ int OutputMixer::DeRegisterExternalMediaProcessing()
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
"OutputMixer::DeRegisterExternalMediaProcessing()");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
_externalMedia = false;
_externalMediaCallbackPtr = NULL;
@ -314,7 +309,7 @@ int OutputMixer::StartRecordingPlayout(const char* fileName,
format = kFileFormatCompressedFile;
}
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
// Destroy the old instance
if (_outputFileRecorderPtr)
@ -394,7 +389,7 @@ int OutputMixer::StartRecordingPlayout(OutStream* stream,
format = kFileFormatCompressedFile;
}
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
// Destroy the old instance
if (_outputFileRecorderPtr)
@ -445,7 +440,7 @@ int OutputMixer::StopRecordingPlayout()
return -1;
}
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (_outputFileRecorderPtr->StopRecording() != 0)
{
@ -472,7 +467,7 @@ int OutputMixer::GetMixedAudio(int sample_rate_hz,
// --- Record playout if enabled
{
CriticalSectionScoped cs(&_fileCritSect);
rtc::CritScope cs(&_fileCritSect);
if (_outputFileRecording && _outputFileRecorderPtr)
_outputFileRecorderPtr->RecordAudioToFile(_audioFrame);
}
@ -536,7 +531,7 @@ OutputMixer::DoOperationsOnCombinedSignal(bool feed_data_to_apm)
// --- External media processing
{
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (_externalMedia)
{
const bool is_stereo = (_audioFrame.num_channels_ == 2);

View File

@ -11,6 +11,7 @@
#ifndef WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_H_
#define WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_H_
#include "webrtc/base/criticalsection.h"
#include "webrtc/common_audio/resampler/include/push_resampler.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer.h"
@ -23,7 +24,6 @@
namespace webrtc {
class AudioProcessing;
class CriticalSectionWrapper;
class FileWrapper;
class VoEMediaProcess;
@ -108,10 +108,9 @@ private:
Statistics* _engineStatisticsPtr;
AudioProcessing* _audioProcessingModulePtr;
// owns
CriticalSectionWrapper& _callbackCritSect;
rtc::CriticalSection _callbackCritSect;
// protect the _outputFileRecorderPtr and _outputFileRecording
CriticalSectionWrapper& _fileCritSect;
rtc::CriticalSection _fileCritSect;
AudioConferenceMixer& _mixerModule;
AudioFrame _audioFrame;
// Converts mixed audio to the audio device output rate.

View File

@ -11,7 +11,6 @@
#include "webrtc/voice_engine/shared_data.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"
#include "webrtc/voice_engine/output_mixer.h"
@ -25,7 +24,6 @@ static int32_t _gInstanceCounter = 0;
SharedData::SharedData(const Config& config)
: _instanceId(++_gInstanceCounter),
_apiCritPtr(CriticalSectionWrapper::CreateCriticalSection()),
_channelManager(_gInstanceCounter, config),
_engineStatistics(_gInstanceCounter),
_audioDevicePtr(NULL),
@ -51,7 +49,6 @@ SharedData::~SharedData()
if (_audioDevicePtr) {
_audioDevicePtr->Release();
}
delete _apiCritPtr;
_moduleProcessThreadPtr->Stop();
Trace::ReturnTrace();
}

View File

@ -11,6 +11,7 @@
#ifndef WEBRTC_VOICE_ENGINE_SHARED_DATA_H
#define WEBRTC_VOICE_ENGINE_SHARED_DATA_H
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
@ -23,7 +24,6 @@ class ProcessThread;
namespace webrtc {
class Config;
class CriticalSectionWrapper;
namespace voe {
@ -43,7 +43,7 @@ public:
void set_audio_processing(AudioProcessing* audio_processing);
TransmitMixer* transmit_mixer() { return _transmitMixerPtr; }
OutputMixer* output_mixer() { return _outputMixerPtr; }
CriticalSectionWrapper* crit_sec() { return _apiCritPtr; }
rtc::CriticalSection* crit_sec() { return &_apiCritPtr; }
ProcessThread* process_thread() { return _moduleProcessThreadPtr.get(); }
AudioDeviceModule::AudioLayer audio_device_layer() const {
return _audioDeviceLayer;
@ -63,7 +63,7 @@ public:
protected:
const uint32_t _instanceId;
CriticalSectionWrapper* _apiCritPtr;
mutable rtc::CriticalSection _apiCritPtr;
ChannelManager _channelManager;
Statistics _engineStatistics;
AudioDeviceModule* _audioDevicePtr;

View File

@ -13,7 +13,6 @@
#include "webrtc/voice_engine/statistics.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
namespace webrtc {
@ -21,20 +20,14 @@ namespace webrtc {
namespace voe {
Statistics::Statistics(uint32_t instanceId) :
_critPtr(CriticalSectionWrapper::CreateCriticalSection()),
_instanceId(instanceId),
_lastError(0),
_isInitialized(false)
{
}
Statistics::~Statistics()
{
if (_critPtr)
{
delete _critPtr;
_critPtr = NULL;
}
}
int32_t Statistics::SetInitialized()
@ -56,7 +49,7 @@ bool Statistics::Initialized() const
int32_t Statistics::SetLastError(int32_t error) const
{
CriticalSectionScoped cs(_critPtr);
rtc::CritScope cs(&lock_);
_lastError = error;
return 0;
}
@ -64,11 +57,11 @@ int32_t Statistics::SetLastError(int32_t error) const
int32_t Statistics::SetLastError(int32_t error,
TraceLevel level) const
{
CriticalSectionScoped cs(_critPtr);
_lastError = error;
WEBRTC_TRACE(level, kTraceVoice, VoEId(_instanceId,-1),
"error code is set to %d",
_lastError);
error);
rtc::CritScope cs(&lock_);
_lastError = error;
return 0;
}
@ -76,22 +69,28 @@ int32_t Statistics::SetLastError(
int32_t error,
TraceLevel level, const char* msg) const
{
CriticalSectionScoped cs(_critPtr);
char traceMessage[KTraceMaxMessageSize];
assert(strlen(msg) < KTraceMaxMessageSize);
_lastError = error;
sprintf(traceMessage, "%s (error=%d)", msg, error);
WEBRTC_TRACE(level, kTraceVoice, VoEId(_instanceId,-1), "%s",
traceMessage);
rtc::CritScope cs(&lock_);
_lastError = error;
return 0;
}
int32_t Statistics::LastError() const
{
CriticalSectionScoped cs(_critPtr);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"LastError() => %d", _lastError);
return _lastError;
int32_t ret;
{
rtc::CritScope cs(&lock_);
ret = _lastError;
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"LastError() => %d", ret);
return ret;
}
} // namespace voe

View File

@ -11,14 +11,13 @@
#ifndef WEBRTC_VOICE_ENGINE_STATISTICS_H
#define WEBRTC_VOICE_ENGINE_STATISTICS_H
#include "webrtc/base/criticalsection.h"
#include "webrtc/common_types.h"
#include "webrtc/typedefs.h"
#include "webrtc/voice_engine/include/voe_errors.h"
#include "webrtc/voice_engine/voice_engine_defines.h"
namespace webrtc {
class CriticalSectionWrapper;
namespace voe {
class Statistics
@ -40,7 +39,7 @@ class Statistics
int32_t LastError() const;
private:
CriticalSectionWrapper* _critPtr;
mutable rtc::CriticalSection lock_;
const uint32_t _instanceId;
mutable int32_t _lastError;
bool _isInitialized;

View File

@ -37,9 +37,7 @@ namespace {
namespace voetest {
ConferenceTransport::ConferenceTransport()
: pq_crit_(webrtc::CriticalSectionWrapper::CreateCriticalSection()),
stream_crit_(webrtc::CriticalSectionWrapper::CreateCriticalSection()),
packet_event_(webrtc::EventWrapper::Create()),
: packet_event_(webrtc::EventWrapper::Create()),
thread_(Run, this, "ConferenceTransport"),
rtt_ms_(0),
stream_count_(0),
@ -120,7 +118,7 @@ bool ConferenceTransport::SendRtcp(const uint8_t* data, size_t len) {
int ConferenceTransport::GetReceiverChannelForSsrc(unsigned int sender_ssrc)
const {
webrtc::CriticalSectionScoped lock(stream_crit_.get());
rtc::CritScope lock(&stream_crit_);
auto it = streams_.find(sender_ssrc);
if (it != streams_.end()) {
return it->second.second;
@ -132,7 +130,7 @@ void ConferenceTransport::StorePacket(Packet::Type type,
const void* data,
size_t len) {
{
webrtc::CriticalSectionScoped lock(pq_crit_.get());
rtc::CritScope lock(&pq_crit_);
packet_queue_.push_back(Packet(type, data, len, rtc::Time()));
}
packet_event_->Set();
@ -198,7 +196,7 @@ bool ConferenceTransport::DispatchPackets() {
while (true) {
Packet packet;
{
webrtc::CriticalSectionScoped lock(pq_crit_.get());
rtc::CritScope lock(&pq_crit_);
if (packet_queue_.empty())
break;
packet = packet_queue_.front();
@ -245,14 +243,14 @@ unsigned int ConferenceTransport::AddStream(std::string file_name,
EXPECT_EQ(0, local_rtp_rtcp_->SetLocalSSRC(new_receiver, kLocalSsrc));
{
webrtc::CriticalSectionScoped lock(stream_crit_.get());
rtc::CritScope lock(&stream_crit_);
streams_[remote_ssrc] = std::make_pair(new_sender, new_receiver);
}
return remote_ssrc; // remote ssrc used as stream id.
}
bool ConferenceTransport::RemoveStream(unsigned int id) {
webrtc::CriticalSectionScoped lock(stream_crit_.get());
rtc::CritScope lock(&stream_crit_);
auto it = streams_.find(id);
if (it == streams_.end()) {
return false;

View File

@ -17,11 +17,11 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/basictypes.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/platform_thread.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/voice_engine/include/voe_base.h"
#include "webrtc/voice_engine/include/voe_codec.h"
@ -128,17 +128,16 @@ class ConferenceTransport: public webrtc::Transport {
void SendPacket(const Packet& packet);
bool DispatchPackets();
const rtc::scoped_ptr<webrtc::CriticalSectionWrapper> pq_crit_;
const rtc::scoped_ptr<webrtc::CriticalSectionWrapper> stream_crit_;
mutable rtc::CriticalSection pq_crit_;
mutable rtc::CriticalSection stream_crit_;
const rtc::scoped_ptr<webrtc::EventWrapper> packet_event_;
rtc::PlatformThread thread_;
unsigned int rtt_ms_;
unsigned int stream_count_;
std::map<unsigned int, std::pair<int, int>> streams_
GUARDED_BY(stream_crit_.get());
std::deque<Packet> packet_queue_ GUARDED_BY(pq_crit_.get());
std::map<unsigned int, std::pair<int, int>> streams_ GUARDED_BY(stream_crit_);
std::deque<Packet> packet_queue_ GUARDED_BY(pq_crit_);
int local_sender_; // Channel Id of local sender
int reflector_;

View File

@ -13,12 +13,12 @@
#include <deque>
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/platform_thread.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/system_wrappers/include/atomic32.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/system_wrappers/include/sleep.h"
#include "webrtc/voice_engine/test/auto_test/fixtures/before_initialization_fixture.h"
@ -28,8 +28,7 @@ class TestErrorObserver;
class LoopBackTransport : public webrtc::Transport {
public:
LoopBackTransport(webrtc::VoENetwork* voe_network, int channel)
: crit_(webrtc::CriticalSectionWrapper::CreateCriticalSection()),
packet_event_(webrtc::EventWrapper::Create()),
: packet_event_(webrtc::EventWrapper::Create()),
thread_(NetworkProcess, this, "LoopBackTransport"),
channel_(channel),
voe_network_(voe_network),
@ -62,7 +61,7 @@ class LoopBackTransport : public webrtc::Transport {
}
void AddChannel(uint32_t ssrc, int channel) {
webrtc::CriticalSectionScoped lock(crit_.get());
rtc::CritScope lock(&crit_);
channels_[ssrc] = channel;
}
@ -85,7 +84,7 @@ class LoopBackTransport : public webrtc::Transport {
const void* data,
size_t len) {
{
webrtc::CriticalSectionScoped lock(crit_.get());
rtc::CritScope lock(&crit_);
packet_queue_.push_back(Packet(type, data, len));
}
packet_event_->Set();
@ -110,7 +109,7 @@ class LoopBackTransport : public webrtc::Transport {
Packet p;
int channel = channel_;
{
webrtc::CriticalSectionScoped lock(crit_.get());
rtc::CritScope lock(&crit_);
if (packet_queue_.empty())
break;
p = packet_queue_.front();
@ -143,12 +142,12 @@ class LoopBackTransport : public webrtc::Transport {
return true;
}
const rtc::scoped_ptr<webrtc::CriticalSectionWrapper> crit_;
mutable rtc::CriticalSection crit_;
const rtc::scoped_ptr<webrtc::EventWrapper> packet_event_;
rtc::PlatformThread thread_;
std::deque<Packet> packet_queue_ GUARDED_BY(crit_.get());
std::deque<Packet> packet_queue_ GUARDED_BY(crit_);
const int channel_;
std::map<uint32_t, int> channels_ GUARDED_BY(crit_.get());
std::map<uint32_t, int> channels_ GUARDED_BY(crit_);
webrtc::VoENetwork* const voe_network_;
webrtc::Atomic32 transmitted_packets_;
};

View File

@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/base/criticalsection.h"
#include "webrtc/system_wrappers/include/atomic32.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
@ -17,9 +17,7 @@
class TestRtpObserver : public webrtc::VoERTPObserver {
public:
TestRtpObserver()
: crit_(voetest::CriticalSectionWrapper::CreateCriticalSection()),
changed_ssrc_event_(voetest::EventWrapper::Create()) {}
TestRtpObserver() : changed_ssrc_event_(voetest::EventWrapper::Create()) {}
virtual ~TestRtpObserver() {}
virtual void OnIncomingCSRCChanged(int channel,
unsigned int CSRC,
@ -31,11 +29,11 @@ class TestRtpObserver : public webrtc::VoERTPObserver {
EXPECT_EQ(voetest::kEventSignaled, changed_ssrc_event_->Wait(10*1000));
}
void SetIncomingSsrc(unsigned int ssrc) {
voetest::CriticalSectionScoped lock(crit_.get());
rtc::CritScope lock(&crit_);
incoming_ssrc_ = ssrc;
}
public:
rtc::scoped_ptr<voetest::CriticalSectionWrapper> crit_;
rtc::CriticalSection crit_;
unsigned int incoming_ssrc_;
rtc::scoped_ptr<voetest::EventWrapper> changed_ssrc_event_;
};
@ -48,7 +46,7 @@ void TestRtpObserver::OnIncomingSSRCChanged(int channel,
TEST_LOG("%s", msg);
{
voetest::CriticalSectionScoped lock(crit_.get());
rtc::CritScope lock(&crit_);
if (incoming_ssrc_ == SSRC)
changed_ssrc_event_->Set();
}

View File

@ -43,7 +43,6 @@
#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
namespace webrtc {
class CriticalSectionWrapper;
class VoENetEqStats;
}
#endif

View File

@ -13,7 +13,6 @@
#include "webrtc/base/format_macros.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"
@ -37,7 +36,7 @@ TransmitMixer::OnPeriodicProcess()
bool send_typing_noise_warning = false;
bool typing_noise_detected = false;
{
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
if (_typingNoiseWarningPending) {
send_typing_noise_warning = true;
typing_noise_detected = _typingNoiseDetected;
@ -45,7 +44,7 @@ TransmitMixer::OnPeriodicProcess()
}
}
if (send_typing_noise_warning) {
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (_voiceEngineObserverPtr) {
if (typing_noise_detected) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
@ -71,7 +70,7 @@ TransmitMixer::OnPeriodicProcess()
// Modify |_saturationWarning| under lock to avoid conflict with write op
// in ProcessAudio and also ensure that we don't hold the lock during the
// callback.
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
saturationWarning = _saturationWarning;
if (_saturationWarning)
_saturationWarning = false;
@ -79,7 +78,7 @@ TransmitMixer::OnPeriodicProcess()
if (saturationWarning)
{
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (_voiceEngineObserverPtr)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
@ -118,7 +117,7 @@ void TransmitMixer::PlayFileEnded(int32_t id)
assert(id == _filePlayerId);
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
_filePlaying = false;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
@ -134,14 +133,14 @@ TransmitMixer::RecordFileEnded(int32_t id)
if (id == _fileRecorderId)
{
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
_fileRecording = false;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"TransmitMixer::RecordFileEnded() => fileRecorder module"
"is shutdown");
} else if (id == _fileCallRecorderId)
{
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
_fileCallRecording = false;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"TransmitMixer::RecordFileEnded() => fileCallRecorder"
@ -193,8 +192,6 @@ TransmitMixer::TransmitMixer(uint32_t instanceId) :
_fileRecording(false),
_fileCallRecording(false),
_audioLevel(),
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
_callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
_typingNoiseWarningPending(false),
_typingNoiseDetected(false),
@ -226,7 +223,7 @@ TransmitMixer::~TransmitMixer()
DeRegisterExternalMediaProcessing(kRecordingAllChannelsMixed);
DeRegisterExternalMediaProcessing(kRecordingPreprocessing);
{
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
if (_fileRecorderPtr)
{
_fileRecorderPtr->RegisterModuleFileCallback(NULL);
@ -249,8 +246,6 @@ TransmitMixer::~TransmitMixer()
_filePlayerPtr = NULL;
}
}
delete &_critSect;
delete &_callbackCritSect;
}
int32_t
@ -276,7 +271,7 @@ TransmitMixer::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"TransmitMixer::RegisterVoiceEngineObserver()");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (_voiceEngineObserverPtr)
{
@ -340,7 +335,7 @@ TransmitMixer::PrepareDemux(const void* audioSamples,
samplesPerSec);
{
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (external_preproc_ptr_) {
external_preproc_ptr_->Process(-1, kRecordingPreprocessing,
_audioFrame.data_,
@ -388,7 +383,7 @@ TransmitMixer::PrepareDemux(const void* audioSamples,
// --- Record to file
bool file_recording = false;
{
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
file_recording = _fileRecording;
}
if (file_recording)
@ -397,7 +392,7 @@ TransmitMixer::PrepareDemux(const void* audioSamples,
}
{
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (external_postproc_ptr_) {
external_postproc_ptr_->Process(-1, kRecordingAllChannelsMixed,
_audioFrame.data_,
@ -520,7 +515,7 @@ int TransmitMixer::StartPlayingFileAsMicrophone(const char* fileName,
return 0;
}
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
// Destroy the old instance
if (_filePlayerPtr)
@ -597,7 +592,7 @@ int TransmitMixer::StartPlayingFileAsMicrophone(InStream* stream,
return 0;
}
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
// Destroy the old instance
if (_filePlayerPtr)
@ -654,7 +649,7 @@ int TransmitMixer::StopPlayingFileAsMicrophone()
return 0;
}
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
if (_filePlayerPtr->StopPlayingFile() != 0)
{
@ -686,7 +681,7 @@ int TransmitMixer::StartRecordingMicrophone(const char* fileName,
"TransmitMixer::StartRecordingMicrophone(fileName=%s)",
fileName);
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
if (_fileRecording)
{
@ -764,7 +759,7 @@ int TransmitMixer::StartRecordingMicrophone(OutStream* stream,
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"TransmitMixer::StartRecordingMicrophone()");
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
if (_fileRecording)
{
@ -841,7 +836,7 @@ int TransmitMixer::StopRecordingMicrophone()
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"TransmitMixer::StopRecordingMicrophone()");
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
if (!_fileRecording)
{
@ -903,7 +898,7 @@ int TransmitMixer::StartRecordingCall(const char* fileName,
format = kFileFormatCompressedFile;
}
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
// Destroy the old instance
if (_fileCallRecorderPtr)
@ -981,7 +976,7 @@ int TransmitMixer::StartRecordingCall(OutStream* stream,
format = kFileFormatCompressedFile;
}
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
// Destroy the old instance
if (_fileCallRecorderPtr)
@ -1032,7 +1027,7 @@ int TransmitMixer::StopRecordingCall()
return -1;
}
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
if (_fileCallRecorderPtr->StopRecording() != 0)
{
@ -1062,7 +1057,7 @@ int TransmitMixer::RegisterExternalMediaProcessing(
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"TransmitMixer::RegisterExternalMediaProcessing()");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (!object) {
return -1;
}
@ -1082,7 +1077,7 @@ int TransmitMixer::DeRegisterExternalMediaProcessing(ProcessingTypes type) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
"TransmitMixer::DeRegisterExternalMediaProcessing()");
CriticalSectionScoped cs(&_callbackCritSect);
rtc::CritScope cs(&_callbackCritSect);
if (type == kRecordingAllChannelsMixed) {
external_postproc_ptr_ = NULL;
} else if (type == kRecordingPreprocessing) {
@ -1127,7 +1122,7 @@ bool TransmitMixer::IsRecordingCall()
bool TransmitMixer::IsRecordingMic()
{
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
return _fileRecording;
}
@ -1162,7 +1157,7 @@ void TransmitMixer::GenerateAudioFrame(const int16_t* audio,
int32_t TransmitMixer::RecordAudioToFile(
uint32_t mixingFrequency)
{
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
if (_fileRecorderPtr == NULL)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
@ -1189,7 +1184,7 @@ int32_t TransmitMixer::MixOrReplaceAudioWithFile(
size_t fileSamples(0);
{
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
if (_filePlayerPtr == NULL)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice,
@ -1267,7 +1262,7 @@ void TransmitMixer::ProcessAudio(int delay_ms, int clock_drift,
// Store new capture level. Only updated when analog AGC is enabled.
_captureLevel = agc->stream_analog_level();
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
// Triggers a callback in OnPeriodicProcess().
_saturationWarning |= agc->stream_is_saturated();
}
@ -1282,11 +1277,11 @@ void TransmitMixer::TypingDetection(bool keyPressed)
bool vadActive = _audioFrame.vad_activity_ == AudioFrame::kVadActive;
if (_typingDetection.Process(keyPressed, vadActive)) {
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
_typingNoiseWarningPending = true;
_typingNoiseDetected = true;
} else {
CriticalSectionScoped cs(&_critSect);
rtc::CritScope cs(&_critSect);
// If there is already a warning pending, do not change the state.
// Otherwise set a warning pending if last callback was for noise detected.
if (!_typingNoiseWarningPending && _typingNoiseDetected) {

View File

@ -11,6 +11,7 @@
#ifndef WEBRTC_VOICE_ENGINE_TRANSMIT_MIXER_H
#define WEBRTC_VOICE_ENGINE_TRANSMIT_MIXER_H
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/resampler/include/push_resampler.h"
#include "webrtc/common_types.h"
@ -210,8 +211,8 @@ private:
bool _fileCallRecording;
voe::AudioLevel _audioLevel;
// protect file instances and their variables in MixedParticipants()
CriticalSectionWrapper& _critSect;
CriticalSectionWrapper& _callbackCritSect;
rtc::CriticalSection _critSect;
rtc::CriticalSection _callbackCritSect;
#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
webrtc::TypingDetection _typingDetection;

View File

@ -12,7 +12,6 @@
#include "webrtc/base/logging.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"
#include "webrtc/voice_engine/include/voe_errors.h"

View File

@ -17,7 +17,6 @@
#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
#include "webrtc/modules/audio_device/audio_device_impl.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/voice_engine/channel.h"
#include "webrtc/voice_engine/include/voe_errors.h"
@ -39,16 +38,14 @@ VoEBase* VoEBase::GetInterface(VoiceEngine* voiceEngine) {
VoEBaseImpl::VoEBaseImpl(voe::SharedData* shared)
: voiceEngineObserverPtr_(nullptr),
callbackCritSect_(*CriticalSectionWrapper::CreateCriticalSection()),
shared_(shared) {}
VoEBaseImpl::~VoEBaseImpl() {
TerminateInternal();
delete &callbackCritSect_;
}
void VoEBaseImpl::OnErrorIsReported(const ErrorCode error) {
CriticalSectionScoped cs(&callbackCritSect_);
rtc::CritScope cs(&callbackCritSect_);
int errCode = 0;
if (error == AudioDeviceObserver::kRecordingError) {
errCode = VE_RUNTIME_REC_ERROR;
@ -64,7 +61,7 @@ void VoEBaseImpl::OnErrorIsReported(const ErrorCode error) {
}
void VoEBaseImpl::OnWarningIsReported(const WarningCode warning) {
CriticalSectionScoped cs(&callbackCritSect_);
rtc::CritScope cs(&callbackCritSect_);
int warningCode = 0;
if (warning == AudioDeviceObserver::kRecordingWarning) {
warningCode = VE_RUNTIME_REC_WARNING;
@ -176,7 +173,7 @@ void VoEBaseImpl::PullRenderData(int bits_per_sample,
}
int VoEBaseImpl::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) {
CriticalSectionScoped cs(&callbackCritSect_);
rtc::CritScope cs(&callbackCritSect_);
if (voiceEngineObserverPtr_) {
shared_->SetLastError(
VE_INVALID_OPERATION, kTraceError,
@ -196,7 +193,7 @@ int VoEBaseImpl::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) {
}
int VoEBaseImpl::DeRegisterVoiceEngineObserver() {
CriticalSectionScoped cs(&callbackCritSect_);
rtc::CritScope cs(&callbackCritSect_);
if (!voiceEngineObserverPtr_) {
shared_->SetLastError(
VE_INVALID_OPERATION, kTraceError,
@ -216,7 +213,7 @@ int VoEBaseImpl::DeRegisterVoiceEngineObserver() {
int VoEBaseImpl::Init(AudioDeviceModule* external_adm,
AudioProcessing* audioproc) {
CriticalSectionScoped cs(shared_->crit_sec());
rtc::CritScope cs(shared_->crit_sec());
WebRtcSpl_Init();
if (shared_->statistics().Initialized()) {
return 0;
@ -382,12 +379,12 @@ int VoEBaseImpl::Init(AudioDeviceModule* external_adm,
}
int VoEBaseImpl::Terminate() {
CriticalSectionScoped cs(shared_->crit_sec());
rtc::CritScope cs(shared_->crit_sec());
return TerminateInternal();
}
int VoEBaseImpl::CreateChannel() {
CriticalSectionScoped cs(shared_->crit_sec());
rtc::CritScope cs(shared_->crit_sec());
if (!shared_->statistics().Initialized()) {
shared_->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
@ -398,7 +395,7 @@ int VoEBaseImpl::CreateChannel() {
}
int VoEBaseImpl::CreateChannel(const Config& config) {
CriticalSectionScoped cs(shared_->crit_sec());
rtc::CritScope cs(shared_->crit_sec());
if (!shared_->statistics().Initialized()) {
shared_->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
@ -434,7 +431,7 @@ int VoEBaseImpl::InitializeChannel(voe::ChannelOwner* channel_owner) {
}
int VoEBaseImpl::DeleteChannel(int channel) {
CriticalSectionScoped cs(shared_->crit_sec());
rtc::CritScope cs(shared_->crit_sec());
if (!shared_->statistics().Initialized()) {
shared_->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
@ -461,7 +458,7 @@ int VoEBaseImpl::DeleteChannel(int channel) {
}
int VoEBaseImpl::StartReceive(int channel) {
CriticalSectionScoped cs(shared_->crit_sec());
rtc::CritScope cs(shared_->crit_sec());
if (!shared_->statistics().Initialized()) {
shared_->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
@ -477,7 +474,7 @@ int VoEBaseImpl::StartReceive(int channel) {
}
int VoEBaseImpl::StopReceive(int channel) {
CriticalSectionScoped cs(shared_->crit_sec());
rtc::CritScope cs(shared_->crit_sec());
if (!shared_->statistics().Initialized()) {
shared_->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
@ -493,7 +490,7 @@ int VoEBaseImpl::StopReceive(int channel) {
}
int VoEBaseImpl::StartPlayout(int channel) {
CriticalSectionScoped cs(shared_->crit_sec());
rtc::CritScope cs(shared_->crit_sec());
if (!shared_->statistics().Initialized()) {
shared_->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
@ -517,7 +514,7 @@ int VoEBaseImpl::StartPlayout(int channel) {
}
int VoEBaseImpl::StopPlayout(int channel) {
CriticalSectionScoped cs(shared_->crit_sec());
rtc::CritScope cs(shared_->crit_sec());
if (!shared_->statistics().Initialized()) {
shared_->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
@ -537,7 +534,7 @@ int VoEBaseImpl::StopPlayout(int channel) {
}
int VoEBaseImpl::StartSend(int channel) {
CriticalSectionScoped cs(shared_->crit_sec());
rtc::CritScope cs(shared_->crit_sec());
if (!shared_->statistics().Initialized()) {
shared_->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
@ -561,7 +558,7 @@ int VoEBaseImpl::StartSend(int channel) {
}
int VoEBaseImpl::StopSend(int channel) {
CriticalSectionScoped cs(shared_->crit_sec());
rtc::CritScope cs(shared_->crit_sec());
if (!shared_->statistics().Initialized()) {
shared_->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
@ -795,7 +792,7 @@ void VoEBaseImpl::GetPlayoutData(int sample_rate, size_t number_of_channels,
int VoEBaseImpl::AssociateSendChannel(int channel,
int accociate_send_channel) {
CriticalSectionScoped cs(shared_->crit_sec());
rtc::CritScope cs(shared_->crit_sec());
if (!shared_->statistics().Initialized()) {
shared_->SetLastError(VE_NOT_INITED, kTraceError);

View File

@ -13,6 +13,7 @@
#include "webrtc/voice_engine/include/voe_base.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/voice_engine/shared_data.h"
@ -138,7 +139,7 @@ class VoEBaseImpl : public VoEBase,
// channel.
int InitializeChannel(voe::ChannelOwner* channel_owner);
VoiceEngineObserver* voiceEngineObserverPtr_;
CriticalSectionWrapper& callbackCritSect_;
rtc::CriticalSection callbackCritSect_;
AudioFrame audioFrame_;
voe::SharedData* shared_;

View File

@ -12,7 +12,6 @@
#include "webrtc/base/format_macros.h"
#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"
#include "webrtc/voice_engine/include/voe_errors.h"

View File

@ -10,7 +10,7 @@
#include "webrtc/voice_engine/voe_dtmf_impl.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"
#include "webrtc/voice_engine/include/voe_errors.h"
@ -197,7 +197,7 @@ int VoEDtmfImpl::SetDtmfFeedbackStatus(bool enable, bool directFeedback) {
"SetDtmfFeedbackStatus(enable=%d, directFeeback=%d)",
(int)enable, (int)directFeedback);
CriticalSectionScoped sc(_shared->crit_sec());
rtc::CritScope cs(_shared->crit_sec());
_dtmfFeedback = enable;
_dtmfDirectFeedback = directFeedback;
@ -206,7 +206,7 @@ int VoEDtmfImpl::SetDtmfFeedbackStatus(bool enable, bool directFeedback) {
}
int VoEDtmfImpl::GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) {
CriticalSectionScoped sc(_shared->crit_sec());
rtc::CritScope cs(_shared->crit_sec());
enabled = _dtmfFeedback;
directFeedback = _dtmfDirectFeedback;

View File

@ -10,7 +10,6 @@
#include "webrtc/voice_engine/voe_external_media_impl.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"
#include "webrtc/voice_engine/include/voe_errors.h"

View File

@ -11,7 +11,6 @@
#include "webrtc/voice_engine/voe_file_impl.h"
#include "webrtc/modules/media_file/media_file.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"

View File

@ -12,7 +12,6 @@
#include <assert.h>
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/include/voe_errors.h"
#include "webrtc/voice_engine/voice_engine_impl.h"
@ -234,7 +233,7 @@ int VoEHardwareImpl::SetRecordingDevice(int index,
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetRecordingDevice(index=%d, recordingChannel=%d)", index,
(int)recordingChannel);
CriticalSectionScoped cs(_shared->crit_sec());
rtc::CritScope cs(_shared->crit_sec());
if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError);
@ -345,7 +344,7 @@ int VoEHardwareImpl::SetRecordingDevice(int index,
int VoEHardwareImpl::SetPlayoutDevice(int index) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetPlayoutDevice(index=%d)", index);
CriticalSectionScoped cs(_shared->crit_sec());
rtc::CritScope cs(_shared->crit_sec());
if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError);

View File

@ -11,7 +11,6 @@
#include "webrtc/voice_engine/voe_neteq_stats_impl.h"
#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"
#include "webrtc/voice_engine/include/voe_errors.h"

View File

@ -13,7 +13,6 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/format_macros.h"
#include "webrtc/base/logging.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"
#include "webrtc/voice_engine/include/voe_errors.h"

View File

@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/include/voe_errors.h"

View File

@ -10,7 +10,6 @@
#include "webrtc/voice_engine/voe_video_sync_impl.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"
#include "webrtc/voice_engine/include/voe_errors.h"

View File

@ -10,7 +10,6 @@
#include "webrtc/voice_engine/voe_volume_control_impl.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"
#include "webrtc/voice_engine/include/voe_errors.h"

View File

@ -17,7 +17,6 @@
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel_proxy.h"
#include "webrtc/voice_engine/voice_engine_impl.h"
@ -66,7 +65,7 @@ int VoiceEngineImpl::Release() {
rtc::scoped_ptr<voe::ChannelProxy> VoiceEngineImpl::GetChannelProxy(
int channel_id) {
RTC_DCHECK(channel_id >= 0);
CriticalSectionScoped cs(crit_sec());
rtc::CritScope cs(crit_sec());
RTC_DCHECK(statistics().Initialized());
return rtc::scoped_ptr<voe::ChannelProxy>(
new voe::ChannelProxy(channel_manager().GetChannel(channel_id)));