2016-07-04 06:33:02 -07:00
|
|
|
/*
|
|
|
|
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
|
|
|
|
*
|
|
|
|
|
* Use of this source code is governed by a BSD-style license
|
|
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
|
|
|
|
* in the file PATENTS. All contributing project authors may
|
|
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
|
|
|
*/
|
|
|
|
|
|
2016-08-24 02:20:54 -07:00
|
|
|
#include "webrtc/modules/audio_mixer/audio_mixer_impl.h"
|
2016-07-04 06:33:02 -07:00
|
|
|
|
|
|
|
|
#include <algorithm>
|
2016-07-29 02:12:41 -07:00
|
|
|
#include <functional>
|
2016-09-07 06:13:12 -07:00
|
|
|
#include <utility>
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-10-07 05:28:32 -07:00
|
|
|
#include "webrtc/base/logging.h"
|
2016-08-15 03:01:31 -07:00
|
|
|
#include "webrtc/modules/audio_mixer/audio_frame_manipulator.h"
|
2016-07-04 06:33:02 -07:00
|
|
|
#include "webrtc/modules/utility/include/audio_frame_operations.h"
|
|
|
|
|
|
|
|
|
|
namespace webrtc {
|
|
|
|
|
namespace {
|
|
|
|
|
|
2016-10-12 02:14:59 -07:00
|
|
|
struct SourceFrame {
|
|
|
|
|
SourceFrame(AudioMixerImpl::SourceStatus* source_status,
|
2016-10-07 05:28:32 -07:00
|
|
|
AudioFrame* audio_frame,
|
|
|
|
|
bool muted)
|
2016-10-12 02:14:59 -07:00
|
|
|
: source_status(source_status), audio_frame(audio_frame), muted(muted) {
|
|
|
|
|
RTC_DCHECK(source_status);
|
|
|
|
|
RTC_DCHECK(audio_frame);
|
|
|
|
|
if (!muted) {
|
|
|
|
|
energy = AudioMixerCalculateEnergy(*audio_frame);
|
2016-07-29 02:12:41 -07:00
|
|
|
}
|
|
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-10-12 02:14:59 -07:00
|
|
|
SourceFrame(AudioMixerImpl::SourceStatus* source_status,
|
2016-10-07 05:28:32 -07:00
|
|
|
AudioFrame* audio_frame,
|
|
|
|
|
bool muted,
|
2016-09-07 07:42:14 -07:00
|
|
|
uint32_t energy)
|
2016-10-12 02:14:59 -07:00
|
|
|
: source_status(source_status),
|
|
|
|
|
audio_frame(audio_frame),
|
|
|
|
|
muted(muted),
|
|
|
|
|
energy(energy) {
|
|
|
|
|
RTC_DCHECK(source_status);
|
|
|
|
|
RTC_DCHECK(audio_frame);
|
|
|
|
|
}
|
2016-07-29 02:12:41 -07:00
|
|
|
|
2016-10-12 02:14:59 -07:00
|
|
|
AudioMixerImpl::SourceStatus* source_status = nullptr;
|
|
|
|
|
AudioFrame* audio_frame = nullptr;
|
|
|
|
|
bool muted = true;
|
|
|
|
|
uint32_t energy = 0;
|
|
|
|
|
};
|
2016-07-29 02:12:41 -07:00
|
|
|
|
2016-10-12 02:14:59 -07:00
|
|
|
// ShouldMixBefore(a, b) is used to select mixer sources.
|
|
|
|
|
bool ShouldMixBefore(const SourceFrame& a, const SourceFrame& b) {
|
|
|
|
|
if (a.muted != b.muted) {
|
|
|
|
|
return b.muted;
|
2016-07-29 02:12:41 -07:00
|
|
|
}
|
|
|
|
|
|
2016-10-12 02:14:59 -07:00
|
|
|
const auto a_activity = a.audio_frame->vad_activity_;
|
|
|
|
|
const auto b_activity = b.audio_frame->vad_activity_;
|
2016-07-29 01:36:14 -07:00
|
|
|
|
2016-10-12 02:14:59 -07:00
|
|
|
if (a_activity != b_activity) {
|
|
|
|
|
return a_activity == AudioFrame::kVadActive;
|
|
|
|
|
}
|
2016-08-08 10:18:58 -07:00
|
|
|
|
2016-10-12 02:14:59 -07:00
|
|
|
return a.energy > b.energy;
|
|
|
|
|
}
|
2016-09-07 07:42:14 -07:00
|
|
|
|
2016-10-12 02:14:59 -07:00
|
|
|
void RampAndUpdateGain(
|
|
|
|
|
const std::vector<SourceFrame>& mixed_sources_and_frames) {
|
|
|
|
|
for (const auto& source_frame : mixed_sources_and_frames) {
|
|
|
|
|
float target_gain = source_frame.source_status->is_mixed ? 1.0f : 0.0f;
|
|
|
|
|
Ramp(source_frame.source_status->gain, target_gain,
|
|
|
|
|
source_frame.audio_frame);
|
|
|
|
|
source_frame.source_status->gain = target_gain;
|
2016-07-04 06:33:02 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-04 04:06:20 -07:00
|
|
|
// Mix the AudioFrames stored in audioFrameList into mixed_audio.
|
|
|
|
|
int32_t MixFromList(AudioFrame* mixed_audio,
|
|
|
|
|
const AudioFrameList& audio_frame_list,
|
|
|
|
|
bool use_limiter) {
|
2016-10-12 03:06:09 -07:00
|
|
|
if (audio_frame_list.empty()) {
|
2016-10-04 04:06:20 -07:00
|
|
|
return 0;
|
2016-10-12 03:06:09 -07:00
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-10-04 04:06:20 -07:00
|
|
|
if (audio_frame_list.size() == 1) {
|
|
|
|
|
mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_;
|
|
|
|
|
mixed_audio->elapsed_time_ms_ = audio_frame_list.front()->elapsed_time_ms_;
|
|
|
|
|
} else {
|
|
|
|
|
// TODO(wu): Issue 3390.
|
|
|
|
|
// Audio frame timestamp is only supported in one channel case.
|
|
|
|
|
mixed_audio->timestamp_ = 0;
|
|
|
|
|
mixed_audio->elapsed_time_ms_ = -1;
|
|
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-10-04 04:06:20 -07:00
|
|
|
for (const auto& frame : audio_frame_list) {
|
|
|
|
|
RTC_DCHECK_EQ(mixed_audio->sample_rate_hz_, frame->sample_rate_hz_);
|
|
|
|
|
RTC_DCHECK_EQ(
|
|
|
|
|
frame->samples_per_channel_,
|
|
|
|
|
static_cast<size_t>((mixed_audio->sample_rate_hz_ *
|
|
|
|
|
webrtc::AudioMixerImpl::kFrameDurationInMs) /
|
|
|
|
|
1000));
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-10-04 04:06:20 -07:00
|
|
|
// Mix |f.frame| into |mixed_audio|, with saturation protection.
|
|
|
|
|
// These effect is applied to |f.frame| itself prior to mixing.
|
|
|
|
|
if (use_limiter) {
|
|
|
|
|
// Divide by two to avoid saturation in the mixing.
|
|
|
|
|
// This is only meaningful if the limiter will be used.
|
|
|
|
|
*frame >>= 1;
|
|
|
|
|
}
|
|
|
|
|
RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_);
|
|
|
|
|
*mixed_audio += *frame;
|
|
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-12 02:14:59 -07:00
|
|
|
AudioMixerImpl::SourceStatusList::const_iterator FindSourceInList(
|
2016-10-11 06:18:31 -07:00
|
|
|
AudioMixerImpl::Source const* audio_source,
|
2016-10-12 02:14:59 -07:00
|
|
|
AudioMixerImpl::SourceStatusList const* audio_source_list) {
|
2016-10-07 05:28:32 -07:00
|
|
|
return std::find_if(audio_source_list->begin(), audio_source_list->end(),
|
2016-10-12 02:14:59 -07:00
|
|
|
[audio_source](const AudioMixerImpl::SourceStatus& p) {
|
|
|
|
|
return p.audio_source == audio_source;
|
2016-10-07 05:28:32 -07:00
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-12 03:06:09 -07:00
|
|
|
// TODO(aleloi): remove non-const version when WEBRTC only supports modern STL.
|
2016-10-12 02:14:59 -07:00
|
|
|
AudioMixerImpl::SourceStatusList::iterator FindSourceInList(
|
2016-10-11 06:18:31 -07:00
|
|
|
AudioMixerImpl::Source const* audio_source,
|
2016-10-12 02:14:59 -07:00
|
|
|
AudioMixerImpl::SourceStatusList* audio_source_list) {
|
2016-10-07 05:28:32 -07:00
|
|
|
return std::find_if(audio_source_list->begin(), audio_source_list->end(),
|
2016-10-12 02:14:59 -07:00
|
|
|
[audio_source](const AudioMixerImpl::SourceStatus& p) {
|
|
|
|
|
return p.audio_source == audio_source;
|
2016-10-07 05:28:32 -07:00
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-04 04:06:20 -07:00
|
|
|
} // namespace
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-10-12 03:06:09 -07:00
|
|
|
AudioMixerImpl::AudioMixerImpl(std::unique_ptr<AudioProcessing> limiter)
|
|
|
|
|
: audio_source_list_(),
|
2016-07-04 06:33:02 -07:00
|
|
|
use_limiter_(true),
|
2016-09-07 06:13:12 -07:00
|
|
|
time_stamp_(0),
|
|
|
|
|
limiter_(std::move(limiter)) {
|
|
|
|
|
SetOutputFrequency(kDefaultFrequency);
|
2016-07-28 06:36:22 -07:00
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-08-24 02:20:54 -07:00
|
|
|
AudioMixerImpl::~AudioMixerImpl() {}
|
2016-08-16 02:15:49 -07:00
|
|
|
|
2016-10-12 06:07:07 -07:00
|
|
|
rtc::scoped_refptr<AudioMixerImpl> AudioMixerImpl::Create() {
|
2016-07-04 06:33:02 -07:00
|
|
|
Config config;
|
|
|
|
|
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
|
2016-09-07 06:13:12 -07:00
|
|
|
std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config));
|
2016-10-12 03:06:09 -07:00
|
|
|
if (!limiter.get()) {
|
2016-09-07 06:13:12 -07:00
|
|
|
return nullptr;
|
2016-10-12 03:06:09 -07:00
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-09-07 06:13:12 -07:00
|
|
|
if (limiter->gain_control()->set_mode(GainControl::kFixedDigital) !=
|
2016-10-12 03:06:09 -07:00
|
|
|
limiter->kNoError) {
|
2016-09-07 06:13:12 -07:00
|
|
|
return nullptr;
|
2016-10-12 03:06:09 -07:00
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
|
|
|
|
|
// We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the
|
|
|
|
|
// divide-by-2 but -7 is used instead to give a bit of headroom since the
|
|
|
|
|
// AGC is not a hard limiter.
|
2016-10-12 03:06:09 -07:00
|
|
|
if (limiter->gain_control()->set_target_level_dbfs(7) != limiter->kNoError) {
|
2016-09-07 06:13:12 -07:00
|
|
|
return nullptr;
|
2016-10-12 03:06:09 -07:00
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-10-12 03:06:09 -07:00
|
|
|
if (limiter->gain_control()->set_compression_gain_db(0) !=
|
|
|
|
|
limiter->kNoError) {
|
2016-09-07 06:13:12 -07:00
|
|
|
return nullptr;
|
2016-10-12 03:06:09 -07:00
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-10-12 03:06:09 -07:00
|
|
|
if (limiter->gain_control()->enable_limiter(true) != limiter->kNoError) {
|
2016-09-07 06:13:12 -07:00
|
|
|
return nullptr;
|
2016-10-12 03:06:09 -07:00
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-10-12 03:06:09 -07:00
|
|
|
if (limiter->gain_control()->Enable(true) != limiter->kNoError) {
|
2016-09-07 06:13:12 -07:00
|
|
|
return nullptr;
|
2016-10-12 03:06:09 -07:00
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-10-12 06:07:07 -07:00
|
|
|
return rtc::scoped_refptr<AudioMixerImpl>(
|
|
|
|
|
new rtc::RefCountedObject<AudioMixerImpl>(std::move(limiter)));
|
2016-07-04 06:33:02 -07:00
|
|
|
}
|
|
|
|
|
|
2016-08-24 02:20:54 -07:00
|
|
|
void AudioMixerImpl::Mix(int sample_rate,
|
|
|
|
|
size_t number_of_channels,
|
|
|
|
|
AudioFrame* audio_frame_for_mixing) {
|
2016-08-08 10:18:58 -07:00
|
|
|
RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2);
|
2016-10-20 14:23:24 -07:00
|
|
|
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
|
2016-08-08 10:18:58 -07:00
|
|
|
|
2016-09-07 06:13:12 -07:00
|
|
|
if (OutputFrequency() != sample_rate) {
|
2016-10-12 03:06:09 -07:00
|
|
|
SetOutputFrequency(sample_rate);
|
2016-09-07 06:13:12 -07:00
|
|
|
}
|
|
|
|
|
|
2016-09-07 07:42:14 -07:00
|
|
|
AudioFrameList mix_list;
|
2016-09-07 06:13:12 -07:00
|
|
|
{
|
|
|
|
|
rtc::CritScope lock(&crit_);
|
2016-10-12 06:07:07 -07:00
|
|
|
mix_list = GetAudioFromSources();
|
2016-07-04 06:33:02 -07:00
|
|
|
}
|
|
|
|
|
|
2016-09-07 07:42:14 -07:00
|
|
|
for (const auto& frame : mix_list) {
|
2016-10-11 06:18:31 -07:00
|
|
|
RemixFrame(number_of_channels, frame);
|
2016-08-08 10:18:58 -07:00
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-07-28 03:52:15 -07:00
|
|
|
audio_frame_for_mixing->UpdateFrame(
|
2016-09-08 01:25:46 -07:00
|
|
|
-1, time_stamp_, NULL, 0, OutputFrequency(), AudioFrame::kNormalSpeech,
|
2016-08-08 10:18:58 -07:00
|
|
|
AudioFrame::kVadPassive, number_of_channels);
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-08-08 10:25:04 -07:00
|
|
|
time_stamp_ += static_cast<uint32_t>(sample_size_);
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-10-12 06:07:07 -07:00
|
|
|
use_limiter_ = mix_list.size() > 1;
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-09-07 07:42:14 -07:00
|
|
|
// We only use the limiter if we're actually mixing multiple streams.
|
2016-10-12 03:06:09 -07:00
|
|
|
MixFromList(audio_frame_for_mixing, mix_list, use_limiter_);
|
2016-09-07 07:42:14 -07:00
|
|
|
|
2016-09-07 06:13:12 -07:00
|
|
|
if (audio_frame_for_mixing->samples_per_channel_ == 0) {
|
|
|
|
|
// Nothing was mixed, set the audio samples to silence.
|
|
|
|
|
audio_frame_for_mixing->samples_per_channel_ = sample_size_;
|
|
|
|
|
audio_frame_for_mixing->Mute();
|
|
|
|
|
} else {
|
|
|
|
|
// Only call the limiter if we have something to mix.
|
|
|
|
|
LimitMixedAudio(audio_frame_for_mixing);
|
2016-07-04 06:33:02 -07:00
|
|
|
}
|
2016-08-24 01:17:12 -07:00
|
|
|
|
2016-07-04 06:33:02 -07:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-12 03:06:09 -07:00
|
|
|
void AudioMixerImpl::SetOutputFrequency(int frequency) {
|
2016-10-20 14:23:24 -07:00
|
|
|
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
|
2016-08-08 10:25:04 -07:00
|
|
|
output_frequency_ = frequency;
|
2016-10-04 04:06:20 -07:00
|
|
|
sample_size_ = (output_frequency_ * kFrameDurationInMs) / 1000;
|
2016-07-04 06:33:02 -07:00
|
|
|
}
|
|
|
|
|
|
2016-10-12 03:06:09 -07:00
|
|
|
int AudioMixerImpl::OutputFrequency() const {
|
2016-10-20 14:23:24 -07:00
|
|
|
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
|
2016-08-08 10:25:04 -07:00
|
|
|
return output_frequency_;
|
2016-07-04 06:33:02 -07:00
|
|
|
}
|
|
|
|
|
|
2016-10-12 06:07:07 -07:00
|
|
|
bool AudioMixerImpl::AddSource(Source* audio_source) {
|
|
|
|
|
RTC_DCHECK(audio_source);
|
|
|
|
|
rtc::CritScope lock(&crit_);
|
|
|
|
|
RTC_DCHECK(FindSourceInList(audio_source, &audio_source_list_) ==
|
|
|
|
|
audio_source_list_.end())
|
|
|
|
|
<< "Source already added to mixer";
|
|
|
|
|
audio_source_list_.emplace_back(audio_source, false, 0);
|
|
|
|
|
return true;
|
2016-07-04 06:33:02 -07:00
|
|
|
}
|
|
|
|
|
|
2016-10-12 06:07:07 -07:00
|
|
|
bool AudioMixerImpl::RemoveSource(Source* audio_source) {
|
|
|
|
|
RTC_DCHECK(audio_source);
|
|
|
|
|
rtc::CritScope lock(&crit_);
|
|
|
|
|
const auto iter = FindSourceInList(audio_source, &audio_source_list_);
|
|
|
|
|
RTC_DCHECK(iter != audio_source_list_.end()) << "Source not present in mixer";
|
|
|
|
|
audio_source_list_.erase(iter);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-10-12 06:07:07 -07:00
|
|
|
AudioFrameList AudioMixerImpl::GetAudioFromSources() {
|
2016-10-20 14:23:24 -07:00
|
|
|
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
|
2016-07-29 02:12:41 -07:00
|
|
|
AudioFrameList result;
|
2016-09-08 01:25:46 -07:00
|
|
|
std::vector<SourceFrame> audio_source_mixing_data_list;
|
2016-09-07 07:42:14 -07:00
|
|
|
std::vector<SourceFrame> ramp_list;
|
2016-07-29 01:23:49 -07:00
|
|
|
|
2016-07-29 02:12:41 -07:00
|
|
|
// Get audio source audio and put it in the struct vector.
|
2016-10-07 05:28:32 -07:00
|
|
|
for (auto& source_and_status : audio_source_list_) {
|
|
|
|
|
auto audio_frame_with_info =
|
2016-10-12 02:14:59 -07:00
|
|
|
source_and_status.audio_source->GetAudioFrameWithInfo(
|
2016-10-12 03:06:09 -07:00
|
|
|
static_cast<int>(OutputFrequency()));
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-09-08 01:25:46 -07:00
|
|
|
const auto audio_frame_info = audio_frame_with_info.audio_frame_info;
|
2016-07-29 02:12:41 -07:00
|
|
|
AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
|
|
|
|
|
|
2016-10-11 06:18:31 -07:00
|
|
|
if (audio_frame_info == Source::AudioFrameInfo::kError) {
|
2016-10-12 03:06:09 -07:00
|
|
|
LOG_F(LS_WARNING) << "failed to GetAudioFrameWithInfo() from source";
|
2016-07-29 02:12:41 -07:00
|
|
|
continue;
|
2016-07-04 06:33:02 -07:00
|
|
|
}
|
2016-09-08 01:25:46 -07:00
|
|
|
audio_source_mixing_data_list.emplace_back(
|
2016-10-07 05:28:32 -07:00
|
|
|
&source_and_status, audio_source_audio_frame,
|
2016-10-11 06:18:31 -07:00
|
|
|
audio_frame_info == Source::AudioFrameInfo::kMuted);
|
2016-07-29 02:12:41 -07:00
|
|
|
}
|
2016-07-29 01:23:49 -07:00
|
|
|
|
2016-07-29 02:12:41 -07:00
|
|
|
// Sort frames by sorting function.
|
2016-09-08 01:25:46 -07:00
|
|
|
std::sort(audio_source_mixing_data_list.begin(),
|
2016-10-12 02:14:59 -07:00
|
|
|
audio_source_mixing_data_list.end(), ShouldMixBefore);
|
2016-07-29 01:23:49 -07:00
|
|
|
|
2016-09-08 01:25:46 -07:00
|
|
|
int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources;
|
|
|
|
|
|
|
|
|
|
// Go through list in order and put unmuted frames in result list.
|
2016-10-07 05:28:32 -07:00
|
|
|
for (const auto& p : audio_source_mixing_data_list) {
|
2016-07-29 02:12:41 -07:00
|
|
|
// Filter muted.
|
2016-10-12 02:14:59 -07:00
|
|
|
if (p.muted) {
|
|
|
|
|
p.source_status->is_mixed = false;
|
2016-07-29 02:12:41 -07:00
|
|
|
continue;
|
2016-07-04 06:33:02 -07:00
|
|
|
}
|
2016-07-29 02:12:41 -07:00
|
|
|
|
|
|
|
|
// Add frame to result vector for mixing.
|
|
|
|
|
bool is_mixed = false;
|
2016-09-08 01:25:46 -07:00
|
|
|
if (max_audio_frame_counter > 0) {
|
|
|
|
|
--max_audio_frame_counter;
|
2016-10-12 02:14:59 -07:00
|
|
|
result.push_back(p.audio_frame);
|
|
|
|
|
ramp_list.emplace_back(p.source_status, p.audio_frame, false, -1);
|
2016-07-29 02:12:41 -07:00
|
|
|
is_mixed = true;
|
2016-07-29 01:36:14 -07:00
|
|
|
}
|
2016-10-12 02:14:59 -07:00
|
|
|
p.source_status->is_mixed = is_mixed;
|
2016-07-29 01:36:14 -07:00
|
|
|
}
|
2016-10-12 02:14:59 -07:00
|
|
|
RampAndUpdateGain(ramp_list);
|
2016-07-29 02:12:41 -07:00
|
|
|
return result;
|
2016-07-04 06:33:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-09-08 01:25:46 -07:00
|
|
|
bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const {
|
2016-10-20 14:23:24 -07:00
|
|
|
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
|
2016-07-04 06:33:02 -07:00
|
|
|
if (!use_limiter_) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Smoothly limit the mixed frame.
|
2016-09-08 01:25:46 -07:00
|
|
|
const int error = limiter_->ProcessStream(mixed_audio);
|
2016-07-04 06:33:02 -07:00
|
|
|
|
|
|
|
|
// And now we can safely restore the level. This procedure results in
|
|
|
|
|
// some loss of resolution, deemed acceptable.
|
|
|
|
|
//
|
|
|
|
|
// It's possible to apply the gain in the AGC (with a target level of 0 dbFS
|
|
|
|
|
// and compression gain of 6 dB). However, in the transition frame when this
|
2016-07-28 03:52:15 -07:00
|
|
|
// is enabled (moving from one to two audio sources) it has the potential to
|
2016-07-04 06:33:02 -07:00
|
|
|
// create discontinuities in the mixed frame.
|
|
|
|
|
//
|
|
|
|
|
// Instead we double the frame (with addition since left-shifting a
|
|
|
|
|
// negative value is undefined).
|
2016-09-08 01:25:46 -07:00
|
|
|
*mixed_audio += *mixed_audio;
|
2016-07-04 06:33:02 -07:00
|
|
|
|
2016-08-08 10:25:04 -07:00
|
|
|
if (error != limiter_->kNoError) {
|
2016-10-12 03:06:09 -07:00
|
|
|
LOG_F(LS_ERROR) << "Error from AudioProcessing: " << error;
|
2016-07-28 03:52:15 -07:00
|
|
|
RTC_NOTREACHED();
|
2016-07-04 06:33:02 -07:00
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2016-08-24 01:17:12 -07:00
|
|
|
|
2016-10-07 05:28:32 -07:00
|
|
|
bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest(
|
2016-10-12 03:06:09 -07:00
|
|
|
AudioMixerImpl::Source* audio_source) const {
|
2016-10-20 14:23:24 -07:00
|
|
|
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
|
2016-10-07 05:28:32 -07:00
|
|
|
rtc::CritScope lock(&crit_);
|
|
|
|
|
|
|
|
|
|
const auto non_anonymous_iter =
|
|
|
|
|
FindSourceInList(audio_source, &audio_source_list_);
|
|
|
|
|
if (non_anonymous_iter != audio_source_list_.end()) {
|
2016-10-12 02:14:59 -07:00
|
|
|
return non_anonymous_iter->is_mixed;
|
2016-10-07 05:28:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
LOG(LS_ERROR) << "Audio source unknown";
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2016-07-04 06:33:02 -07:00
|
|
|
} // namespace webrtc
|