2011-07-07 08:21:25 +00:00
|
|
|
/*
|
2012-01-30 20:51:15 +00:00
|
|
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
2011-07-07 08:21:25 +00:00
|
|
|
*
|
|
|
|
|
* Use of this source code is governed by a BSD-style license
|
|
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
|
|
|
|
* in the file PATENTS. All contributing project authors may
|
|
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
|
|
|
*/
|
|
|
|
|
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "modules/audio_processing/audio_processing_impl.h"
|
2011-07-07 08:21:25 +00:00
|
|
|
|
2015-07-23 11:41:39 -07:00
|
|
|
#include <algorithm>
|
2018-10-23 12:03:01 +02:00
|
|
|
#include <cstdint>
|
2019-09-17 17:06:18 +02:00
|
|
|
#include <memory>
|
2017-05-22 06:57:06 -07:00
|
|
|
#include <string>
|
2018-10-23 12:03:01 +02:00
|
|
|
#include <type_traits>
|
|
|
|
|
#include <utility>
|
2011-07-07 08:21:25 +00:00
|
|
|
|
2018-10-23 12:03:01 +02:00
|
|
|
#include "absl/types/optional.h"
|
|
|
|
|
#include "api/array_view.h"
|
2020-03-16 12:06:02 +01:00
|
|
|
#include "api/audio/audio_frame.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "common_audio/audio_converter.h"
|
|
|
|
|
#include "common_audio/include/audio_util.h"
|
2020-05-11 11:03:47 +02:00
|
|
|
#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "modules/audio_processing/audio_buffer.h"
|
|
|
|
|
#include "modules/audio_processing/common.h"
|
2018-10-23 12:03:01 +02:00
|
|
|
#include "modules/audio_processing/include/audio_frame_view.h"
|
2018-02-12 21:42:56 +01:00
|
|
|
#include "modules/audio_processing/logging/apm_data_dumper.h"
|
2020-04-27 08:39:33 +02:00
|
|
|
#include "modules/audio_processing/optionally_built_submodule_creators.h"
|
2019-01-11 09:11:00 -08:00
|
|
|
#include "rtc_base/atomic_ops.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "rtc_base/checks.h"
|
2019-01-11 09:11:00 -08:00
|
|
|
#include "rtc_base/constructor_magic.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "rtc_base/logging.h"
|
2019-01-11 09:11:00 -08:00
|
|
|
#include "rtc_base/ref_counted_object.h"
|
|
|
|
|
#include "rtc_base/time_utils.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "rtc_base/trace_event.h"
|
2019-09-20 07:50:35 +02:00
|
|
|
#include "system_wrappers/include/field_trial.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "system_wrappers/include/metrics.h"
|
2011-12-03 00:03:31 +00:00
|
|
|
|
2015-07-23 11:41:39 -07:00
|
|
|
#define RETURN_ON_ERR(expr) \
|
|
|
|
|
do { \
|
|
|
|
|
int err = (expr); \
|
|
|
|
|
if (err != kNoError) { \
|
|
|
|
|
return err; \
|
|
|
|
|
} \
|
2014-01-07 17:45:09 +00:00
|
|
|
} while (0)
|
|
|
|
|
|
2011-07-07 08:21:25 +00:00
|
|
|
namespace webrtc {
|
2016-03-16 18:26:35 -07:00
|
|
|
|
2015-07-23 11:41:39 -07:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
|
|
static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) {
|
|
|
|
|
switch (layout) {
|
|
|
|
|
case AudioProcessing::kMono:
|
|
|
|
|
case AudioProcessing::kStereo:
|
|
|
|
|
return false;
|
|
|
|
|
case AudioProcessing::kMonoAndKeyboard:
|
|
|
|
|
case AudioProcessing::kStereoAndKeyboard:
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-14 05:23:22 -07:00
|
|
|
RTC_NOTREACHED();
|
2015-07-23 11:41:39 -07:00
|
|
|
return false;
|
|
|
|
|
}
|
2016-03-16 18:26:35 -07:00
|
|
|
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
bool SampleRateSupportsMultiBand(int sample_rate_hz) {
|
2016-03-16 18:26:35 -07:00
|
|
|
return sample_rate_hz == AudioProcessing::kSampleRate32kHz ||
|
|
|
|
|
sample_rate_hz == AudioProcessing::kSampleRate48kHz;
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-10 13:04:15 +01:00
|
|
|
// Checks whether the high-pass filter should be done in the full-band.
|
|
|
|
|
bool EnforceSplitBandHpf() {
|
|
|
|
|
return field_trial::IsEnabled("WebRTC-FullBandHpfKillSwitch");
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-02 14:59:40 +01:00
|
|
|
// Checks whether AEC3 should be allowed to decide what the default
|
|
|
|
|
// configuration should be based on the render and capture channel configuration
|
|
|
|
|
// at hand.
|
|
|
|
|
bool UseSetupSpecificDefaultAec3Congfig() {
|
|
|
|
|
return !field_trial::IsEnabled(
|
|
|
|
|
"WebRTC-Aec3SetupSpecificDefaultConfigDefaultsKillSwitch");
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-23 15:49:51 +02:00
|
|
|
// Identify the native processing rate that best handles a sample rate.
|
2019-09-15 00:27:58 +02:00
|
|
|
int SuitableProcessRate(int minimum_rate,
|
|
|
|
|
int max_splitting_rate,
|
|
|
|
|
bool band_splitting_required) {
|
2019-08-23 15:49:51 +02:00
|
|
|
const int uppermost_native_rate =
|
2019-09-15 00:27:58 +02:00
|
|
|
band_splitting_required ? max_splitting_rate : 48000;
|
2019-08-23 15:49:51 +02:00
|
|
|
for (auto rate : {16000, 32000, 48000}) {
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
if (rate >= uppermost_native_rate) {
|
|
|
|
|
return uppermost_native_rate;
|
|
|
|
|
}
|
|
|
|
|
if (rate >= minimum_rate) {
|
2016-03-16 18:26:35 -07:00
|
|
|
return rate;
|
|
|
|
|
}
|
|
|
|
|
}
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
RTC_NOTREACHED();
|
|
|
|
|
return uppermost_native_rate;
|
2016-03-16 18:26:35 -07:00
|
|
|
}
|
|
|
|
|
|
2019-03-27 13:28:08 +01:00
|
|
|
GainControl::Mode Agc1ConfigModeToInterfaceMode(
|
|
|
|
|
AudioProcessing::Config::GainController1::Mode mode) {
|
|
|
|
|
using Agc1Config = AudioProcessing::Config::GainController1;
|
|
|
|
|
switch (mode) {
|
|
|
|
|
case Agc1Config::kAdaptiveAnalog:
|
|
|
|
|
return GainControl::kAdaptiveAnalog;
|
|
|
|
|
case Agc1Config::kAdaptiveDigital:
|
|
|
|
|
return GainControl::kAdaptiveDigital;
|
|
|
|
|
case Agc1Config::kFixedDigital:
|
|
|
|
|
return GainControl::kFixedDigital;
|
|
|
|
|
}
|
2020-11-08 00:49:37 +01:00
|
|
|
RTC_CHECK_NOTREACHED();
|
2019-03-27 13:28:08 +01:00
|
|
|
}
|
|
|
|
|
|
2021-03-12 23:08:09 +00:00
|
|
|
bool MinimizeProcessingForUnusedOutput() {
|
|
|
|
|
return !field_trial::IsEnabled("WebRTC-MutedStateKillSwitch");
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-15 07:19:21 -07:00
|
|
|
// Maximum lengths that frame of samples being passed from the render side to
|
|
|
|
|
// the capture side can have (does not apply to AEC3).
|
|
|
|
|
static const size_t kMaxAllowedValuesOfSamplesPerBand = 160;
|
|
|
|
|
static const size_t kMaxAllowedValuesOfSamplesPerFrame = 480;
|
|
|
|
|
|
2016-10-22 05:04:30 -07:00
|
|
|
// Maximum number of frames to buffer in the render queue.
|
|
|
|
|
// TODO(peah): Decrease this once we properly handle hugely unbalanced
|
|
|
|
|
// reverse and forward call numbers.
|
|
|
|
|
static const size_t kMaxNumFramesToBuffer = 100;
|
2020-10-14 12:47:50 +02:00
|
|
|
|
2015-07-23 11:41:39 -07:00
|
|
|
} // namespace
|
2014-01-07 17:45:09 +00:00
|
|
|
|
|
|
|
|
// Throughout webrtc, it's assumed that success is represented by zero.
|
2015-01-14 10:51:54 +00:00
|
|
|
static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero");
|
2014-01-07 17:45:09 +00:00
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
AudioProcessingImpl::SubmoduleStates::SubmoduleStates(
|
2017-12-18 16:02:40 +01:00
|
|
|
bool capture_post_processor_enabled,
|
2018-08-29 10:37:09 +02:00
|
|
|
bool render_pre_processor_enabled,
|
|
|
|
|
bool capture_analyzer_enabled)
|
2017-12-18 16:02:40 +01:00
|
|
|
: capture_post_processor_enabled_(capture_post_processor_enabled),
|
2018-08-29 10:37:09 +02:00
|
|
|
render_pre_processor_enabled_(render_pre_processor_enabled),
|
|
|
|
|
capture_analyzer_enabled_(capture_analyzer_enabled) {}
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
bool AudioProcessingImpl::SubmoduleStates::Update(
|
2018-09-28 14:15:09 +02:00
|
|
|
bool high_pass_filter_enabled,
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
bool mobile_echo_controller_enabled,
|
2016-10-28 05:39:16 -07:00
|
|
|
bool residual_echo_detector_enabled,
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
bool noise_suppressor_enabled,
|
|
|
|
|
bool adaptive_gain_controller_enabled,
|
2017-05-22 06:57:06 -07:00
|
|
|
bool gain_controller2_enabled,
|
2021-03-15 16:31:04 +00:00
|
|
|
bool gain_adjustment_enabled,
|
2017-10-16 13:49:04 +02:00
|
|
|
bool echo_controller_enabled,
|
2019-10-07 14:03:56 +02:00
|
|
|
bool voice_detector_enabled,
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
bool transient_suppressor_enabled) {
|
|
|
|
|
bool changed = false;
|
2018-09-28 14:15:09 +02:00
|
|
|
changed |= (high_pass_filter_enabled != high_pass_filter_enabled_);
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
changed |=
|
|
|
|
|
(mobile_echo_controller_enabled != mobile_echo_controller_enabled_);
|
2016-10-28 05:39:16 -07:00
|
|
|
changed |=
|
|
|
|
|
(residual_echo_detector_enabled != residual_echo_detector_enabled_);
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
changed |= (noise_suppressor_enabled != noise_suppressor_enabled_);
|
|
|
|
|
changed |=
|
|
|
|
|
(adaptive_gain_controller_enabled != adaptive_gain_controller_enabled_);
|
2019-04-26 11:33:37 +02:00
|
|
|
changed |= (gain_controller2_enabled != gain_controller2_enabled_);
|
2021-03-15 16:31:04 +00:00
|
|
|
changed |= (gain_adjustment_enabled != gain_adjustment_enabled_);
|
2017-10-16 13:49:04 +02:00
|
|
|
changed |= (echo_controller_enabled != echo_controller_enabled_);
|
2019-10-07 14:03:56 +02:00
|
|
|
changed |= (voice_detector_enabled != voice_detector_enabled_);
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
changed |= (transient_suppressor_enabled != transient_suppressor_enabled_);
|
|
|
|
|
if (changed) {
|
2018-09-28 14:15:09 +02:00
|
|
|
high_pass_filter_enabled_ = high_pass_filter_enabled;
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
mobile_echo_controller_enabled_ = mobile_echo_controller_enabled;
|
2016-10-28 05:39:16 -07:00
|
|
|
residual_echo_detector_enabled_ = residual_echo_detector_enabled;
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
noise_suppressor_enabled_ = noise_suppressor_enabled;
|
|
|
|
|
adaptive_gain_controller_enabled_ = adaptive_gain_controller_enabled;
|
2017-05-22 06:57:06 -07:00
|
|
|
gain_controller2_enabled_ = gain_controller2_enabled;
|
2021-03-15 16:31:04 +00:00
|
|
|
gain_adjustment_enabled_ = gain_adjustment_enabled;
|
2017-10-16 13:49:04 +02:00
|
|
|
echo_controller_enabled_ = echo_controller_enabled;
|
2019-10-07 14:03:56 +02:00
|
|
|
voice_detector_enabled_ = voice_detector_enabled;
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
transient_suppressor_enabled_ = transient_suppressor_enabled;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
changed |= first_update_;
|
|
|
|
|
first_update_ = false;
|
|
|
|
|
return changed;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
bool AudioProcessingImpl::SubmoduleStates::CaptureMultiBandSubModulesActive()
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
const {
|
2019-10-09 13:34:36 +02:00
|
|
|
return CaptureMultiBandProcessingPresent() || voice_detector_enabled_;
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
bool AudioProcessingImpl::SubmoduleStates::CaptureMultiBandProcessingPresent()
|
|
|
|
|
const {
|
2019-10-09 13:34:36 +02:00
|
|
|
// If echo controller is present, assume it performs active processing.
|
|
|
|
|
return CaptureMultiBandProcessingActive(/*ec_processing_active=*/true);
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
bool AudioProcessingImpl::SubmoduleStates::CaptureMultiBandProcessingActive(
|
2019-10-09 13:34:36 +02:00
|
|
|
bool ec_processing_active) const {
|
2019-12-09 10:18:44 +01:00
|
|
|
return high_pass_filter_enabled_ || mobile_echo_controller_enabled_ ||
|
|
|
|
|
noise_suppressor_enabled_ || adaptive_gain_controller_enabled_ ||
|
2019-10-09 13:34:36 +02:00
|
|
|
(echo_controller_enabled_ && ec_processing_active);
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
bool AudioProcessingImpl::SubmoduleStates::CaptureFullBandProcessingActive()
|
2017-05-23 05:33:56 -07:00
|
|
|
const {
|
2018-04-16 16:31:22 +02:00
|
|
|
return gain_controller2_enabled_ || capture_post_processor_enabled_ ||
|
2021-03-15 16:31:04 +00:00
|
|
|
gain_adjustment_enabled_;
|
2017-05-23 05:33:56 -07:00
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
bool AudioProcessingImpl::SubmoduleStates::CaptureAnalyzerActive() const {
|
2018-08-29 10:37:09 +02:00
|
|
|
return capture_analyzer_enabled_;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
bool AudioProcessingImpl::SubmoduleStates::RenderMultiBandSubModulesActive()
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
const {
|
2019-12-09 10:18:44 +01:00
|
|
|
return RenderMultiBandProcessingActive() || mobile_echo_controller_enabled_ ||
|
|
|
|
|
adaptive_gain_controller_enabled_ || echo_controller_enabled_;
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
bool AudioProcessingImpl::SubmoduleStates::RenderFullBandProcessingActive()
|
2017-12-18 16:02:40 +01:00
|
|
|
const {
|
|
|
|
|
return render_pre_processor_enabled_;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
bool AudioProcessingImpl::SubmoduleStates::RenderMultiBandProcessingActive()
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
const {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
bool AudioProcessingImpl::SubmoduleStates::HighPassFilteringRequired() const {
|
2019-12-09 10:18:44 +01:00
|
|
|
return high_pass_filter_enabled_ || mobile_echo_controller_enabled_ ||
|
|
|
|
|
noise_suppressor_enabled_;
|
2018-09-28 14:15:09 +02:00
|
|
|
}
|
|
|
|
|
|
2016-09-12 16:47:25 -07:00
|
|
|
AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config)
|
2019-09-20 07:50:35 +02:00
|
|
|
: AudioProcessingImpl(config,
|
|
|
|
|
/*capture_post_processor=*/nullptr,
|
|
|
|
|
/*render_pre_processor=*/nullptr,
|
|
|
|
|
/*echo_control_factory=*/nullptr,
|
|
|
|
|
/*echo_detector=*/nullptr,
|
|
|
|
|
/*capture_analyzer=*/nullptr) {}
|
2015-01-15 18:07:21 +00:00
|
|
|
|
2018-02-12 21:42:56 +01:00
|
|
|
int AudioProcessingImpl::instance_count_ = 0;
|
|
|
|
|
|
2017-09-25 12:04:02 +02:00
|
|
|
AudioProcessingImpl::AudioProcessingImpl(
|
|
|
|
|
const webrtc::Config& config,
|
2017-12-18 16:02:40 +01:00
|
|
|
std::unique_ptr<CustomProcessing> capture_post_processor,
|
|
|
|
|
std::unique_ptr<CustomProcessing> render_pre_processor,
|
2017-10-12 15:13:17 +02:00
|
|
|
std::unique_ptr<EchoControlFactory> echo_control_factory,
|
2018-08-29 10:37:09 +02:00
|
|
|
rtc::scoped_refptr<EchoDetector> echo_detector,
|
|
|
|
|
std::unique_ptr<CustomAudioAnalyzer> capture_analyzer)
|
2018-02-12 21:42:56 +01:00
|
|
|
: data_dumper_(
|
|
|
|
|
new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))),
|
2019-12-02 14:59:40 +01:00
|
|
|
use_setup_specific_default_aec3_config_(
|
|
|
|
|
UseSetupSpecificDefaultAec3Congfig()),
|
2021-03-03 10:52:44 +00:00
|
|
|
capture_runtime_settings_(RuntimeSettingQueueSize()),
|
|
|
|
|
render_runtime_settings_(RuntimeSettingQueueSize()),
|
2018-05-15 10:52:28 +02:00
|
|
|
capture_runtime_settings_enqueuer_(&capture_runtime_settings_),
|
|
|
|
|
render_runtime_settings_enqueuer_(&render_runtime_settings_),
|
2017-10-12 15:13:17 +02:00
|
|
|
echo_control_factory_(std::move(echo_control_factory)),
|
2018-08-29 10:37:09 +02:00
|
|
|
submodule_states_(!!capture_post_processor,
|
|
|
|
|
!!render_pre_processor,
|
|
|
|
|
!!capture_analyzer),
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_(std::move(capture_post_processor),
|
|
|
|
|
std::move(render_pre_processor),
|
|
|
|
|
std::move(echo_detector),
|
2019-11-22 12:11:40 +01:00
|
|
|
std::move(capture_analyzer)),
|
2020-01-13 14:43:13 +01:00
|
|
|
constants_(!field_trial::IsEnabled(
|
2019-09-20 07:50:35 +02:00
|
|
|
"WebRTC-ApmExperimentalMultiChannelRenderKillSwitch"),
|
|
|
|
|
!field_trial::IsEnabled(
|
2019-12-10 13:04:15 +01:00
|
|
|
"WebRTC-ApmExperimentalMultiChannelCaptureKillSwitch"),
|
2021-03-12 23:08:09 +00:00
|
|
|
EnforceSplitBandHpf(),
|
|
|
|
|
MinimizeProcessingForUnusedOutput()),
|
|
|
|
|
capture_(),
|
2018-08-30 13:01:34 +02:00
|
|
|
capture_nonlocked_() {
|
2019-10-21 12:54:02 +02:00
|
|
|
RTC_LOG(LS_INFO) << "Injected APM submodules:"
|
2020-01-14 12:11:31 +01:00
|
|
|
"\nEcho control factory: "
|
|
|
|
|
<< !!echo_control_factory_
|
2019-10-21 12:54:02 +02:00
|
|
|
<< "\nEcho detector: " << !!submodules_.echo_detector
|
|
|
|
|
<< "\nCapture analyzer: " << !!submodules_.capture_analyzer
|
|
|
|
|
<< "\nCapture post processor: "
|
|
|
|
|
<< !!submodules_.capture_post_processor
|
|
|
|
|
<< "\nRender pre processor: "
|
|
|
|
|
<< !!submodules_.render_pre_processor;
|
|
|
|
|
|
2019-02-11 13:39:46 +01:00
|
|
|
// Mark Echo Controller enabled if a factory is injected.
|
|
|
|
|
capture_nonlocked_.echo_controller_enabled =
|
|
|
|
|
static_cast<bool>(echo_control_factory_);
|
|
|
|
|
|
|
|
|
|
// If no echo detector is injected, use the ResidualEchoDetector.
|
2019-10-18 13:29:43 +02:00
|
|
|
if (!submodules_.echo_detector) {
|
2021-04-27 14:43:08 +02:00
|
|
|
submodules_.echo_detector = rtc::make_ref_counted<ResidualEchoDetector>();
|
2015-11-28 12:35:15 -08:00
|
|
|
}
|
2014-12-15 09:41:24 +00:00
|
|
|
|
2020-01-13 14:43:13 +01:00
|
|
|
#if !(defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS))
|
2020-01-02 15:15:36 +01:00
|
|
|
// TODO(webrtc:5298): Remove once the use of ExperimentalNs has been
|
|
|
|
|
// deprecated.
|
|
|
|
|
config_.transient_suppression.enabled = config.Get<ExperimentalNs>().enabled;
|
2020-01-13 14:43:13 +01:00
|
|
|
|
|
|
|
|
// TODO(webrtc:5298): Remove once the use of ExperimentalAgc has been
|
|
|
|
|
// deprecated.
|
|
|
|
|
config_.gain_controller1.analog_gain_controller.enabled =
|
|
|
|
|
config.Get<ExperimentalAgc>().enabled;
|
|
|
|
|
config_.gain_controller1.analog_gain_controller.startup_min_volume =
|
|
|
|
|
config.Get<ExperimentalAgc>().startup_min_volume;
|
|
|
|
|
config_.gain_controller1.analog_gain_controller.clipped_level_min =
|
|
|
|
|
config.Get<ExperimentalAgc>().clipped_level_min;
|
|
|
|
|
config_.gain_controller1.analog_gain_controller.enable_digital_adaptive =
|
|
|
|
|
!config.Get<ExperimentalAgc>().digital_adaptive_disabled;
|
2020-01-02 15:15:36 +01:00
|
|
|
#endif
|
2020-09-01 23:57:20 +02:00
|
|
|
|
|
|
|
|
Initialize();
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2019-11-18 08:52:22 +01:00
|
|
|
AudioProcessingImpl::~AudioProcessingImpl() = default;
|
2011-07-07 08:21:25 +00:00
|
|
|
|
|
|
|
|
int AudioProcessingImpl::Initialize() {
|
2015-11-28 12:35:15 -08:00
|
|
|
// Run in a single-threaded manner during initialization.
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_render(&mutex_render_);
|
|
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
2020-09-01 23:57:20 +02:00
|
|
|
InitializeLocked();
|
|
|
|
|
return kNoError;
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2016-09-16 15:02:15 -07:00
|
|
|
int AudioProcessingImpl::Initialize(int capture_input_sample_rate_hz,
|
|
|
|
|
int capture_output_sample_rate_hz,
|
|
|
|
|
int render_input_sample_rate_hz,
|
|
|
|
|
ChannelLayout capture_input_layout,
|
|
|
|
|
ChannelLayout capture_output_layout,
|
|
|
|
|
ChannelLayout render_input_layout) {
|
2015-07-23 11:41:39 -07:00
|
|
|
const ProcessingConfig processing_config = {
|
2016-09-16 15:02:15 -07:00
|
|
|
{{capture_input_sample_rate_hz, ChannelsFromLayout(capture_input_layout),
|
|
|
|
|
LayoutHasKeyboard(capture_input_layout)},
|
|
|
|
|
{capture_output_sample_rate_hz,
|
|
|
|
|
ChannelsFromLayout(capture_output_layout),
|
|
|
|
|
LayoutHasKeyboard(capture_output_layout)},
|
|
|
|
|
{render_input_sample_rate_hz, ChannelsFromLayout(render_input_layout),
|
|
|
|
|
LayoutHasKeyboard(render_input_layout)},
|
|
|
|
|
{render_input_sample_rate_hz, ChannelsFromLayout(render_input_layout),
|
|
|
|
|
LayoutHasKeyboard(render_input_layout)}}};
|
2015-07-23 11:41:39 -07:00
|
|
|
|
|
|
|
|
return Initialize(processing_config);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) {
|
2015-11-28 12:35:15 -08:00
|
|
|
// Run in a single-threaded manner during initialization.
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_render(&mutex_render_);
|
|
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
2015-07-23 11:41:39 -07:00
|
|
|
return InitializeLocked(processing_config);
|
2014-03-10 22:26:12 +00:00
|
|
|
}
|
|
|
|
|
|
2015-11-28 12:35:15 -08:00
|
|
|
int AudioProcessingImpl::MaybeInitializeRender(
|
2015-11-27 02:47:28 -08:00
|
|
|
const ProcessingConfig& processing_config) {
|
2015-11-28 12:35:15 -08:00
|
|
|
// Called from both threads. Thread check is therefore not possible.
|
2019-05-23 14:28:00 +02:00
|
|
|
if (processing_config == formats_.api_format) {
|
2015-11-17 02:16:45 -08:00
|
|
|
return kNoError;
|
|
|
|
|
}
|
2015-11-28 12:35:15 -08:00
|
|
|
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
2015-11-17 02:16:45 -08:00
|
|
|
return InitializeLocked(processing_config);
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-01 23:57:20 +02:00
|
|
|
void AudioProcessingImpl::InitializeLocked() {
|
2017-06-27 16:00:38 +02:00
|
|
|
UpdateActiveSubmoduleStates();
|
|
|
|
|
|
2019-08-22 11:51:13 +02:00
|
|
|
const int render_audiobuffer_sample_rate_hz =
|
2015-11-28 12:35:15 -08:00
|
|
|
formats_.api_format.reverse_output_stream().num_frames() == 0
|
2019-08-22 11:51:13 +02:00
|
|
|
? formats_.render_processing_format.sample_rate_hz()
|
|
|
|
|
: formats_.api_format.reverse_output_stream().sample_rate_hz();
|
2015-11-28 12:35:15 -08:00
|
|
|
if (formats_.api_format.reverse_input_stream().num_channels() > 0) {
|
|
|
|
|
render_.render_audio.reset(new AudioBuffer(
|
2019-08-22 11:51:13 +02:00
|
|
|
formats_.api_format.reverse_input_stream().sample_rate_hz(),
|
2015-11-28 12:35:15 -08:00
|
|
|
formats_.api_format.reverse_input_stream().num_channels(),
|
2019-08-22 11:51:13 +02:00
|
|
|
formats_.render_processing_format.sample_rate_hz(),
|
2016-09-16 15:02:15 -07:00
|
|
|
formats_.render_processing_format.num_channels(),
|
2019-08-22 11:51:13 +02:00
|
|
|
render_audiobuffer_sample_rate_hz,
|
|
|
|
|
formats_.render_processing_format.num_channels()));
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
if (formats_.api_format.reverse_input_stream() !=
|
|
|
|
|
formats_.api_format.reverse_output_stream()) {
|
2016-02-24 05:22:32 -08:00
|
|
|
render_.render_converter = AudioConverter::Create(
|
2015-11-28 12:35:15 -08:00
|
|
|
formats_.api_format.reverse_input_stream().num_channels(),
|
|
|
|
|
formats_.api_format.reverse_input_stream().num_frames(),
|
|
|
|
|
formats_.api_format.reverse_output_stream().num_channels(),
|
2016-02-24 05:22:32 -08:00
|
|
|
formats_.api_format.reverse_output_stream().num_frames());
|
2015-08-14 10:35:55 -07:00
|
|
|
} else {
|
2015-11-28 12:35:15 -08:00
|
|
|
render_.render_converter.reset(nullptr);
|
2015-08-14 10:35:55 -07:00
|
|
|
}
|
2015-07-23 11:41:39 -07:00
|
|
|
} else {
|
2015-11-28 12:35:15 -08:00
|
|
|
render_.render_audio.reset(nullptr);
|
|
|
|
|
render_.render_converter.reset(nullptr);
|
2015-07-23 11:41:39 -07:00
|
|
|
}
|
2017-05-19 01:28:05 -07:00
|
|
|
|
2019-08-22 11:51:13 +02:00
|
|
|
capture_.capture_audio.reset(new AudioBuffer(
|
|
|
|
|
formats_.api_format.input_stream().sample_rate_hz(),
|
|
|
|
|
formats_.api_format.input_stream().num_channels(),
|
|
|
|
|
capture_nonlocked_.capture_processing_format.sample_rate_hz(),
|
|
|
|
|
formats_.api_format.output_stream().num_channels(),
|
|
|
|
|
formats_.api_format.output_stream().sample_rate_hz(),
|
|
|
|
|
formats_.api_format.output_stream().num_channels()));
|
2011-07-07 08:21:25 +00:00
|
|
|
|
2019-10-09 13:02:14 +02:00
|
|
|
if (capture_nonlocked_.capture_processing_format.sample_rate_hz() <
|
|
|
|
|
formats_.api_format.output_stream().sample_rate_hz() &&
|
|
|
|
|
formats_.api_format.output_stream().sample_rate_hz() == 48000) {
|
|
|
|
|
capture_.capture_fullband_audio.reset(
|
|
|
|
|
new AudioBuffer(formats_.api_format.input_stream().sample_rate_hz(),
|
|
|
|
|
formats_.api_format.input_stream().num_channels(),
|
|
|
|
|
formats_.api_format.output_stream().sample_rate_hz(),
|
|
|
|
|
formats_.api_format.output_stream().num_channels(),
|
|
|
|
|
formats_.api_format.output_stream().sample_rate_hz(),
|
|
|
|
|
formats_.api_format.output_stream().num_channels()));
|
|
|
|
|
} else {
|
|
|
|
|
capture_.capture_fullband_audio.reset();
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-22 05:04:30 -07:00
|
|
|
AllocateRenderQueue();
|
|
|
|
|
|
2020-01-13 14:43:13 +01:00
|
|
|
InitializeGainController1();
|
2020-01-02 15:15:36 +01:00
|
|
|
InitializeTransientSuppressor();
|
2020-01-03 14:27:14 +01:00
|
|
|
InitializeHighPassFilter(true);
|
2019-10-07 14:03:56 +02:00
|
|
|
InitializeVoiceDetector();
|
2016-10-28 05:39:16 -07:00
|
|
|
InitializeResidualEchoDetector();
|
2017-10-14 08:28:46 +02:00
|
|
|
InitializeEchoController();
|
2017-05-22 06:57:06 -07:00
|
|
|
InitializeGainController2();
|
2019-10-16 11:46:11 +02:00
|
|
|
InitializeNoiseSuppressor();
|
2018-08-29 10:37:09 +02:00
|
|
|
InitializeAnalyzer();
|
2017-09-25 12:04:02 +02:00
|
|
|
InitializePostProcessor();
|
2017-12-18 16:02:40 +01:00
|
|
|
InitializePreProcessor();
|
2021-03-15 16:31:04 +00:00
|
|
|
InitializeCaptureLevelsAdjuster();
|
2015-12-08 11:07:32 -08:00
|
|
|
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
if (aec_dump_) {
|
2018-08-10 15:38:52 +02:00
|
|
|
aec_dump_->WriteInitMessage(formats_.api_format, rtc::TimeUTCMillis());
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-23 11:41:39 -07:00
|
|
|
int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
|
2017-06-27 16:00:38 +02:00
|
|
|
UpdateActiveSubmoduleStates();
|
|
|
|
|
|
2015-07-23 11:41:39 -07:00
|
|
|
for (const auto& stream : config.streams) {
|
|
|
|
|
if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) {
|
|
|
|
|
return kBadSampleRateError;
|
|
|
|
|
}
|
Revert of Allow more than 2 input channels in AudioProcessing. (patchset #13 id:240001 of https://codereview.webrtc.org/1226093007/)
Reason for revert:
Breaks Chromium FYI content_browsertest on all platforms. The testcase that fails is WebRtcAecDumpBrowserTest.CallWithAecDump.
https://build.chromium.org/p/chromium.webrtc.fyi/builders/Linux/builds/19388
Sample output:
[ RUN ] WebRtcAecDumpBrowserTest.CallWithAecDump
Xlib: extension "RANDR" missing on display ":9".
[4:14:0722/211548:1282124453:WARNING:webrtcvoiceengine.cc(472)] Unexpected codec: ISAC/48000/1 (105)
[4:14:0722/211548:1282124593:WARNING:webrtcvoiceengine.cc(472)] Unexpected codec: PCMU/8000/2 (110)
[4:14:0722/211548:1282124700:WARNING:webrtcvoiceengine.cc(472)] Unexpected codec: PCMA/8000/2 (118)
[4:14:0722/211548:1282124815:WARNING:webrtcvoiceengine.cc(472)] Unexpected codec: G722/8000/2 (119)
[19745:19745:0722/211548:1282133667:INFO:CONSOLE(64)] "Looking at video in element remote-view-1", source: http://127.0.0.1:48819/media/webrtc_test_utilities.js (64)
[19745:19745:0722/211548:1282136892:INFO:CONSOLE(64)] "Looking at video in element remote-view-2", source: http://127.0.0.1:48819/media/webrtc_test_utilities.js (64)
../../content/test/webrtc_content_browsertest_base.cc:62: Failure
Value of: ExecuteScriptAndExtractString( shell()->web_contents(), javascript, &result)
Actual: false
Expected: true
Failed to execute javascript call({video: true, audio: true});.
From javascript: (nothing)
When executing 'call({video: true, audio: true});'
../../content/test/webrtc_content_browsertest_base.cc:75: Failure
Failed
../../content/browser/media/webrtc_aecdump_browsertest.cc:26: Failure
Expected: (base::kNullProcessId) != (*id), actual: 0 vs 0
../../content/browser/media/webrtc_aecdump_browsertest.cc:95: Failure
Value of: GetRenderProcessHostId(&render_process_id)
Actual: false
Expected: true
../../content/browser/media/webrtc_aecdump_browsertest.cc:99: Failure
Value of: base::PathExists(dump_file)
Actual: false
Expected: true
../../content/browser/media/webrtc_aecdump_browsertest.cc:101: Failure
Value of: base::GetFileSize(dump_file, &file_size)
Actual: false
Expected: true
../../content/browser/media/webrtc_aecdump_browsertest.cc:102: Failure
Expected: (file_size) > (0), actual: 0 vs 0
[ FAILED ] WebRtcAecDumpBrowserTest.CallWithAecDump, where TypeParam = and GetParam() = (361 ms)
Original issue's description:
> Allow more than 2 input channels in AudioProcessing.
>
> The number of output channels is constrained to be equal to either 1 or the
> number of input channels.
>
> R=aluebs@webrtc.org, andrew@webrtc.org, pbos@webrtc.org
>
> Committed: https://chromium.googlesource.com/external/webrtc/+/c204754b7a0cc801c70e8ce6c689f57f6ce00b3b
TBR=andrew@webrtc.org,aluebs@webrtc.org,ajm@chromium.org,pbos@chromium.org,pbos@webrtc.org,mgraczyk@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
Review URL: https://codereview.webrtc.org/1253573005
Cr-Commit-Position: refs/heads/master@{#9621}
2015-07-23 04:30:06 -07:00
|
|
|
}
|
2015-07-23 11:41:39 -07:00
|
|
|
|
Convert channel counts to size_t.
IIRC, this was originally requested by ajm during review of the other size_t conversions I did over the past year, and I agreed it made sense, but wanted to do it separately since those changes were already gargantuan.
BUG=chromium:81439
TEST=none
R=henrik.lundin@webrtc.org, henrika@webrtc.org, kjellander@webrtc.org, minyue@webrtc.org, perkj@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org, tina.legrand@webrtc.org
Review URL: https://codereview.webrtc.org/1316523002 .
Cr-Commit-Position: refs/heads/master@{#11229}
2016-01-12 16:26:35 -08:00
|
|
|
const size_t num_in_channels = config.input_stream().num_channels();
|
|
|
|
|
const size_t num_out_channels = config.output_stream().num_channels();
|
2015-07-23 11:41:39 -07:00
|
|
|
|
|
|
|
|
// Need at least one input channel.
|
|
|
|
|
// Need either one output channel or as many outputs as there are inputs.
|
|
|
|
|
if (num_in_channels == 0 ||
|
|
|
|
|
!(num_out_channels == 1 || num_out_channels == num_in_channels)) {
|
2014-03-10 22:26:12 +00:00
|
|
|
return kBadNumberChannelsError;
|
|
|
|
|
}
|
2015-07-23 11:41:39 -07:00
|
|
|
|
2015-11-28 12:35:15 -08:00
|
|
|
formats_.api_format = config;
|
2014-04-22 21:00:04 +00:00
|
|
|
|
2019-09-15 00:27:58 +02:00
|
|
|
// Choose maximum rate to use for the split filtering.
|
|
|
|
|
RTC_DCHECK(config_.pipeline.maximum_internal_processing_rate == 48000 ||
|
|
|
|
|
config_.pipeline.maximum_internal_processing_rate == 32000);
|
|
|
|
|
int max_splitting_rate = 48000;
|
|
|
|
|
if (config_.pipeline.maximum_internal_processing_rate == 32000) {
|
|
|
|
|
max_splitting_rate = config_.pipeline.maximum_internal_processing_rate;
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-23 15:49:51 +02:00
|
|
|
int capture_processing_rate = SuitableProcessRate(
|
2016-04-09 16:06:52 -07:00
|
|
|
std::min(formats_.api_format.input_stream().sample_rate_hz(),
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
formats_.api_format.output_stream().sample_rate_hz()),
|
2019-09-15 00:27:58 +02:00
|
|
|
max_splitting_rate,
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
submodule_states_.CaptureMultiBandSubModulesActive() ||
|
|
|
|
|
submodule_states_.RenderMultiBandSubModulesActive());
|
2019-08-23 15:49:51 +02:00
|
|
|
RTC_DCHECK_NE(8000, capture_processing_rate);
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
|
2016-09-16 15:02:15 -07:00
|
|
|
capture_nonlocked_.capture_processing_format =
|
|
|
|
|
StreamConfig(capture_processing_rate);
|
2014-04-22 21:00:04 +00:00
|
|
|
|
2017-04-07 03:57:48 -07:00
|
|
|
int render_processing_rate;
|
2017-10-18 12:32:42 +02:00
|
|
|
if (!capture_nonlocked_.echo_controller_enabled) {
|
2019-08-23 15:49:51 +02:00
|
|
|
render_processing_rate = SuitableProcessRate(
|
2017-04-07 03:57:48 -07:00
|
|
|
std::min(formats_.api_format.reverse_input_stream().sample_rate_hz(),
|
|
|
|
|
formats_.api_format.reverse_output_stream().sample_rate_hz()),
|
2019-09-15 00:27:58 +02:00
|
|
|
max_splitting_rate,
|
2017-04-07 03:57:48 -07:00
|
|
|
submodule_states_.CaptureMultiBandSubModulesActive() ||
|
|
|
|
|
submodule_states_.RenderMultiBandSubModulesActive());
|
|
|
|
|
} else {
|
|
|
|
|
render_processing_rate = capture_processing_rate;
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-16 15:02:15 -07:00
|
|
|
// If the forward sample rate is 8 kHz, the render stream is also processed
|
2016-04-20 15:27:58 -07:00
|
|
|
// at this rate.
|
2016-09-16 15:02:15 -07:00
|
|
|
if (capture_nonlocked_.capture_processing_format.sample_rate_hz() ==
|
|
|
|
|
kSampleRate8kHz) {
|
|
|
|
|
render_processing_rate = kSampleRate8kHz;
|
2014-04-22 21:00:04 +00:00
|
|
|
} else {
|
2016-09-16 15:02:15 -07:00
|
|
|
render_processing_rate =
|
|
|
|
|
std::max(render_processing_rate, static_cast<int>(kSampleRate16kHz));
|
2014-03-10 22:26:12 +00:00
|
|
|
}
|
|
|
|
|
|
2019-08-23 15:49:51 +02:00
|
|
|
RTC_DCHECK_NE(8000, render_processing_rate);
|
|
|
|
|
|
2017-05-19 01:28:05 -07:00
|
|
|
if (submodule_states_.RenderMultiBandSubModulesActive()) {
|
2019-09-20 07:50:35 +02:00
|
|
|
// By default, downmix the render stream to mono for analysis. This has been
|
|
|
|
|
// demonstrated to work well for AEC in most practical scenarios.
|
2019-11-27 09:34:22 +01:00
|
|
|
const bool multi_channel_render = config_.pipeline.multi_channel_render &&
|
|
|
|
|
constants_.multi_channel_render_support;
|
2019-09-20 07:50:35 +02:00
|
|
|
int render_processing_num_channels =
|
2019-11-27 09:34:22 +01:00
|
|
|
multi_channel_render
|
2019-09-20 07:50:35 +02:00
|
|
|
? formats_.api_format.reverse_input_stream().num_channels()
|
|
|
|
|
: 1;
|
|
|
|
|
formats_.render_processing_format =
|
|
|
|
|
StreamConfig(render_processing_rate, render_processing_num_channels);
|
2017-05-19 01:28:05 -07:00
|
|
|
} else {
|
|
|
|
|
formats_.render_processing_format = StreamConfig(
|
|
|
|
|
formats_.api_format.reverse_input_stream().sample_rate_hz(),
|
|
|
|
|
formats_.api_format.reverse_input_stream().num_channels());
|
|
|
|
|
}
|
2014-03-10 22:26:12 +00:00
|
|
|
|
2016-09-16 15:02:15 -07:00
|
|
|
if (capture_nonlocked_.capture_processing_format.sample_rate_hz() ==
|
|
|
|
|
kSampleRate32kHz ||
|
|
|
|
|
capture_nonlocked_.capture_processing_format.sample_rate_hz() ==
|
|
|
|
|
kSampleRate48kHz) {
|
2015-11-28 12:35:15 -08:00
|
|
|
capture_nonlocked_.split_rate = kSampleRate16kHz;
|
2014-03-10 22:26:12 +00:00
|
|
|
} else {
|
2015-11-28 12:35:15 -08:00
|
|
|
capture_nonlocked_.split_rate =
|
2016-09-16 15:02:15 -07:00
|
|
|
capture_nonlocked_.capture_processing_format.sample_rate_hz();
|
2014-03-10 22:26:12 +00:00
|
|
|
}
|
|
|
|
|
|
2020-09-01 23:57:20 +02:00
|
|
|
InitializeLocked();
|
|
|
|
|
return kNoError;
|
2014-03-10 22:26:12 +00:00
|
|
|
}
|
|
|
|
|
|
2016-09-12 16:47:25 -07:00
|
|
|
void AudioProcessingImpl::ApplyConfig(const AudioProcessing::Config& config) {
|
2019-10-21 12:54:02 +02:00
|
|
|
RTC_LOG(LS_INFO) << "AudioProcessing::ApplyConfig: " << config.ToString();
|
|
|
|
|
|
2016-09-12 16:47:25 -07:00
|
|
|
// Run in a single-threaded manner when applying the settings.
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_render(&mutex_render_);
|
|
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
2016-09-12 16:47:25 -07:00
|
|
|
|
2019-09-20 07:50:35 +02:00
|
|
|
const bool pipeline_config_changed =
|
2019-11-27 09:34:22 +01:00
|
|
|
config_.pipeline.multi_channel_render !=
|
|
|
|
|
config.pipeline.multi_channel_render ||
|
|
|
|
|
config_.pipeline.multi_channel_capture !=
|
2019-12-10 13:04:15 +01:00
|
|
|
config.pipeline.multi_channel_capture ||
|
|
|
|
|
config_.pipeline.maximum_internal_processing_rate !=
|
|
|
|
|
config.pipeline.maximum_internal_processing_rate;
|
2019-09-20 07:50:35 +02:00
|
|
|
|
2019-03-06 04:16:46 +01:00
|
|
|
const bool aec_config_changed =
|
|
|
|
|
config_.echo_canceller.enabled != config.echo_canceller.enabled ||
|
2019-12-09 10:18:44 +01:00
|
|
|
config_.echo_canceller.mobile_mode != config.echo_canceller.mobile_mode;
|
2018-10-10 18:29:07 +02:00
|
|
|
|
2019-03-27 13:28:08 +01:00
|
|
|
const bool agc1_config_changed =
|
2020-10-14 12:47:50 +02:00
|
|
|
config_.gain_controller1 != config.gain_controller1;
|
2019-03-27 13:28:08 +01:00
|
|
|
|
2020-01-03 10:36:34 +01:00
|
|
|
const bool agc2_config_changed =
|
2020-10-14 12:47:50 +02:00
|
|
|
config_.gain_controller2 != config.gain_controller2;
|
2020-01-03 10:36:34 +01:00
|
|
|
|
2019-10-07 14:03:56 +02:00
|
|
|
const bool voice_detection_config_changed =
|
|
|
|
|
config_.voice_detection.enabled != config.voice_detection.enabled;
|
|
|
|
|
|
2019-10-16 11:46:11 +02:00
|
|
|
const bool ns_config_changed =
|
|
|
|
|
config_.noise_suppression.enabled != config.noise_suppression.enabled ||
|
|
|
|
|
config_.noise_suppression.level != config.noise_suppression.level;
|
|
|
|
|
|
2020-01-02 15:15:36 +01:00
|
|
|
const bool ts_config_changed = config_.transient_suppression.enabled !=
|
|
|
|
|
config.transient_suppression.enabled;
|
|
|
|
|
|
2020-01-03 14:27:14 +01:00
|
|
|
const bool pre_amplifier_config_changed =
|
|
|
|
|
config_.pre_amplifier.enabled != config.pre_amplifier.enabled ||
|
|
|
|
|
config_.pre_amplifier.fixed_gain_factor !=
|
|
|
|
|
config.pre_amplifier.fixed_gain_factor;
|
|
|
|
|
|
2021-03-15 16:31:04 +00:00
|
|
|
const bool gain_adjustment_config_changed =
|
|
|
|
|
config_.capture_level_adjustment != config.capture_level_adjustment;
|
|
|
|
|
|
2019-03-06 04:16:46 +01:00
|
|
|
config_ = config;
|
2018-08-17 16:26:14 +02:00
|
|
|
|
2019-03-06 04:16:46 +01:00
|
|
|
if (aec_config_changed) {
|
|
|
|
|
InitializeEchoController();
|
|
|
|
|
}
|
2018-09-17 11:05:17 +02:00
|
|
|
|
2019-10-16 11:46:11 +02:00
|
|
|
if (ns_config_changed) {
|
|
|
|
|
InitializeNoiseSuppressor();
|
|
|
|
|
}
|
2019-01-11 15:10:32 +01:00
|
|
|
|
2020-01-02 15:15:36 +01:00
|
|
|
if (ts_config_changed) {
|
|
|
|
|
InitializeTransientSuppressor();
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-03 14:27:14 +01:00
|
|
|
InitializeHighPassFilter(false);
|
2016-11-22 07:24:52 -08:00
|
|
|
|
2019-03-27 13:28:08 +01:00
|
|
|
if (agc1_config_changed) {
|
2020-01-13 14:43:13 +01:00
|
|
|
InitializeGainController1();
|
2019-03-27 13:28:08 +01:00
|
|
|
}
|
|
|
|
|
|
2018-03-05 15:59:06 +01:00
|
|
|
const bool config_ok = GainController2::Validate(config_.gain_controller2);
|
2017-05-22 06:57:06 -07:00
|
|
|
if (!config_ok) {
|
2020-10-14 12:49:54 +02:00
|
|
|
RTC_LOG(LS_ERROR)
|
|
|
|
|
<< "Invalid Gain Controller 2 config; using the default config.";
|
2017-05-22 06:57:06 -07:00
|
|
|
config_.gain_controller2 = AudioProcessing::Config::GainController2();
|
|
|
|
|
}
|
2020-01-03 14:27:14 +01:00
|
|
|
|
2020-01-03 10:36:34 +01:00
|
|
|
if (agc2_config_changed) {
|
|
|
|
|
InitializeGainController2();
|
|
|
|
|
}
|
2020-01-03 14:27:14 +01:00
|
|
|
|
2021-03-15 16:31:04 +00:00
|
|
|
if (pre_amplifier_config_changed || gain_adjustment_config_changed) {
|
|
|
|
|
InitializeCaptureLevelsAdjuster();
|
2020-01-03 14:27:14 +01:00
|
|
|
}
|
2018-11-26 16:18:25 +01:00
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
if (config_.level_estimation.enabled && !submodules_.output_level_estimator) {
|
|
|
|
|
submodules_.output_level_estimator = std::make_unique<LevelEstimator>();
|
2018-11-26 16:18:25 +01:00
|
|
|
}
|
2018-12-21 16:29:27 +01:00
|
|
|
|
2019-10-07 14:03:56 +02:00
|
|
|
if (voice_detection_config_changed) {
|
|
|
|
|
InitializeVoiceDetector();
|
2018-12-21 16:29:27 +01:00
|
|
|
}
|
2019-09-20 07:50:35 +02:00
|
|
|
|
|
|
|
|
// Reinitialization must happen after all submodule configuration to avoid
|
|
|
|
|
// additional reinitializations on the next capture / render processing call.
|
|
|
|
|
if (pipeline_config_changed) {
|
|
|
|
|
InitializeLocked(formats_.api_format);
|
|
|
|
|
}
|
2016-09-12 16:47:25 -07:00
|
|
|
}
|
|
|
|
|
|
2020-04-27 08:39:33 +02:00
|
|
|
void AudioProcessingImpl::OverrideSubmoduleCreationForTesting(
|
|
|
|
|
const ApmSubmoduleCreationOverrides& overrides) {
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock(&mutex_capture_);
|
2020-04-27 08:39:33 +02:00
|
|
|
submodule_creation_overrides_ = overrides;
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-22 21:00:04 +00:00
|
|
|
int AudioProcessingImpl::proc_sample_rate_hz() const {
|
2015-11-28 12:35:15 -08:00
|
|
|
// Used as callback from submodules, hence locking is not allowed.
|
2016-09-16 15:02:15 -07:00
|
|
|
return capture_nonlocked_.capture_processing_format.sample_rate_hz();
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2019-10-09 13:02:14 +02:00
|
|
|
int AudioProcessingImpl::proc_fullband_sample_rate_hz() const {
|
|
|
|
|
return capture_.capture_fullband_audio
|
|
|
|
|
? capture_.capture_fullband_audio->num_frames() * 100
|
|
|
|
|
: capture_nonlocked_.capture_processing_format.sample_rate_hz();
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-22 21:00:04 +00:00
|
|
|
int AudioProcessingImpl::proc_split_sample_rate_hz() const {
|
2015-11-28 12:35:15 -08:00
|
|
|
// Used as callback from submodules, hence locking is not allowed.
|
|
|
|
|
return capture_nonlocked_.split_rate;
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
Convert channel counts to size_t.
IIRC, this was originally requested by ajm during review of the other size_t conversions I did over the past year, and I agreed it made sense, but wanted to do it separately since those changes were already gargantuan.
BUG=chromium:81439
TEST=none
R=henrik.lundin@webrtc.org, henrika@webrtc.org, kjellander@webrtc.org, minyue@webrtc.org, perkj@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org, tina.legrand@webrtc.org
Review URL: https://codereview.webrtc.org/1316523002 .
Cr-Commit-Position: refs/heads/master@{#11229}
2016-01-12 16:26:35 -08:00
|
|
|
size_t AudioProcessingImpl::num_reverse_channels() const {
|
2015-11-28 12:35:15 -08:00
|
|
|
// Used as callback from submodules, hence locking is not allowed.
|
2016-09-16 15:02:15 -07:00
|
|
|
return formats_.render_processing_format.num_channels();
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
Convert channel counts to size_t.
IIRC, this was originally requested by ajm during review of the other size_t conversions I did over the past year, and I agreed it made sense, but wanted to do it separately since those changes were already gargantuan.
BUG=chromium:81439
TEST=none
R=henrik.lundin@webrtc.org, henrika@webrtc.org, kjellander@webrtc.org, minyue@webrtc.org, perkj@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org, tina.legrand@webrtc.org
Review URL: https://codereview.webrtc.org/1316523002 .
Cr-Commit-Position: refs/heads/master@{#11229}
2016-01-12 16:26:35 -08:00
|
|
|
size_t AudioProcessingImpl::num_input_channels() const {
|
2015-11-28 12:35:15 -08:00
|
|
|
// Used as callback from submodules, hence locking is not allowed.
|
|
|
|
|
return formats_.api_format.input_stream().num_channels();
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
Convert channel counts to size_t.
IIRC, this was originally requested by ajm during review of the other size_t conversions I did over the past year, and I agreed it made sense, but wanted to do it separately since those changes were already gargantuan.
BUG=chromium:81439
TEST=none
R=henrik.lundin@webrtc.org, henrika@webrtc.org, kjellander@webrtc.org, minyue@webrtc.org, perkj@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org, tina.legrand@webrtc.org
Review URL: https://codereview.webrtc.org/1316523002 .
Cr-Commit-Position: refs/heads/master@{#11229}
2016-01-12 16:26:35 -08:00
|
|
|
size_t AudioProcessingImpl::num_proc_channels() const {
|
2016-01-11 20:32:29 -08:00
|
|
|
// Used as callback from submodules, hence locking is not allowed.
|
2019-11-27 09:34:22 +01:00
|
|
|
const bool multi_channel_capture = config_.pipeline.multi_channel_capture &&
|
|
|
|
|
constants_.multi_channel_capture_support;
|
|
|
|
|
if (capture_nonlocked_.echo_controller_enabled && !multi_channel_capture) {
|
2019-09-20 07:50:35 +02:00
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
return num_output_channels();
|
2016-01-11 20:32:29 -08:00
|
|
|
}
|
|
|
|
|
|
Convert channel counts to size_t.
IIRC, this was originally requested by ajm during review of the other size_t conversions I did over the past year, and I agreed it made sense, but wanted to do it separately since those changes were already gargantuan.
BUG=chromium:81439
TEST=none
R=henrik.lundin@webrtc.org, henrika@webrtc.org, kjellander@webrtc.org, minyue@webrtc.org, perkj@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org, tina.legrand@webrtc.org
Review URL: https://codereview.webrtc.org/1316523002 .
Cr-Commit-Position: refs/heads/master@{#11229}
2016-01-12 16:26:35 -08:00
|
|
|
size_t AudioProcessingImpl::num_output_channels() const {
|
2015-11-28 12:35:15 -08:00
|
|
|
// Used as callback from submodules, hence locking is not allowed.
|
|
|
|
|
return formats_.api_format.output_stream().num_channels();
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2014-02-12 22:28:31 +00:00
|
|
|
void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock(&mutex_capture_);
|
2021-02-09 08:47:51 +01:00
|
|
|
HandleCaptureOutputUsedSetting(!muted);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioProcessingImpl::HandleCaptureOutputUsedSetting(
|
|
|
|
|
bool capture_output_used) {
|
2021-03-12 23:08:09 +00:00
|
|
|
capture_.capture_output_used =
|
|
|
|
|
capture_output_used || !constants_.minimize_processing_for_unused_output;
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.agc_manager.get()) {
|
2021-02-09 08:47:51 +01:00
|
|
|
submodules_.agc_manager->HandleCaptureOutputUsedChange(
|
|
|
|
|
capture_.capture_output_used);
|
2014-12-15 09:41:24 +00:00
|
|
|
}
|
2021-03-03 10:52:44 +00:00
|
|
|
if (submodules_.echo_controller) {
|
|
|
|
|
submodules_.echo_controller->SetCaptureOutputUsage(
|
|
|
|
|
capture_.capture_output_used);
|
2021-03-12 14:12:44 +00:00
|
|
|
}
|
|
|
|
|
if (submodules_.noise_suppressor) {
|
|
|
|
|
submodules_.noise_suppressor->SetCaptureOutputUsage(
|
|
|
|
|
capture_.capture_output_used);
|
2021-03-03 10:52:44 +00:00
|
|
|
}
|
2014-02-12 22:28:31 +00:00
|
|
|
}
|
|
|
|
|
|
2018-04-16 12:10:09 +02:00
|
|
|
void AudioProcessingImpl::SetRuntimeSetting(RuntimeSetting setting) {
|
2021-02-09 08:47:51 +01:00
|
|
|
PostRuntimeSetting(setting);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool AudioProcessingImpl::PostRuntimeSetting(RuntimeSetting setting) {
|
2018-05-15 10:52:28 +02:00
|
|
|
switch (setting.type()) {
|
|
|
|
|
case RuntimeSetting::Type::kCustomRenderProcessingRuntimeSetting:
|
2019-11-07 13:22:00 +01:00
|
|
|
case RuntimeSetting::Type::kPlayoutAudioDeviceChange:
|
2021-02-09 08:47:51 +01:00
|
|
|
return render_runtime_settings_enqueuer_.Enqueue(setting);
|
2018-05-15 10:52:28 +02:00
|
|
|
case RuntimeSetting::Type::kCapturePreGain:
|
2021-03-15 16:31:04 +00:00
|
|
|
case RuntimeSetting::Type::kCapturePostGain:
|
2019-03-27 13:28:08 +01:00
|
|
|
case RuntimeSetting::Type::kCaptureCompressionGain:
|
2019-04-26 11:33:37 +02:00
|
|
|
case RuntimeSetting::Type::kCaptureFixedPostGain:
|
2020-08-12 08:46:47 +02:00
|
|
|
case RuntimeSetting::Type::kCaptureOutputUsed:
|
2021-02-09 08:47:51 +01:00
|
|
|
return capture_runtime_settings_enqueuer_.Enqueue(setting);
|
|
|
|
|
case RuntimeSetting::Type::kPlayoutVolumeChange: {
|
|
|
|
|
bool enqueueing_successful;
|
|
|
|
|
enqueueing_successful =
|
|
|
|
|
capture_runtime_settings_enqueuer_.Enqueue(setting);
|
|
|
|
|
enqueueing_successful =
|
|
|
|
|
render_runtime_settings_enqueuer_.Enqueue(setting) &&
|
|
|
|
|
enqueueing_successful;
|
|
|
|
|
return enqueueing_successful;
|
|
|
|
|
}
|
2019-11-07 13:22:00 +01:00
|
|
|
case RuntimeSetting::Type::kNotSpecified:
|
|
|
|
|
RTC_NOTREACHED();
|
2021-02-09 08:47:51 +01:00
|
|
|
return true;
|
2018-05-15 10:52:28 +02:00
|
|
|
}
|
|
|
|
|
// The language allows the enum to have a non-enumerator
|
|
|
|
|
// value. Check that this doesn't happen.
|
|
|
|
|
RTC_NOTREACHED();
|
2021-02-09 08:47:51 +01:00
|
|
|
return true;
|
2018-04-16 12:10:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
AudioProcessingImpl::RuntimeSettingEnqueuer::RuntimeSettingEnqueuer(
|
|
|
|
|
SwapQueue<RuntimeSetting>* runtime_settings)
|
2018-04-20 13:16:55 +02:00
|
|
|
: runtime_settings_(*runtime_settings) {
|
|
|
|
|
RTC_DCHECK(runtime_settings);
|
2018-04-16 12:10:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
AudioProcessingImpl::RuntimeSettingEnqueuer::~RuntimeSettingEnqueuer() =
|
|
|
|
|
default;
|
|
|
|
|
|
2021-02-09 08:47:51 +01:00
|
|
|
bool AudioProcessingImpl::RuntimeSettingEnqueuer::Enqueue(
|
2018-04-16 12:10:09 +02:00
|
|
|
RuntimeSetting setting) {
|
2021-03-03 10:52:44 +00:00
|
|
|
const bool successful_insert = runtime_settings_.Insert(&setting);
|
|
|
|
|
|
|
|
|
|
if (!successful_insert) {
|
2020-10-23 12:40:30 +02:00
|
|
|
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.ApmRuntimeSettingCannotEnqueue", 1);
|
2018-04-16 12:10:09 +02:00
|
|
|
RTC_LOG(LS_ERROR) << "Cannot enqueue a new runtime setting.";
|
2020-10-23 12:40:30 +02:00
|
|
|
}
|
2021-03-03 10:52:44 +00:00
|
|
|
return successful_insert;
|
2018-04-16 12:10:09 +02:00
|
|
|
}
|
2014-02-12 22:28:31 +00:00
|
|
|
|
2020-01-03 14:54:20 +01:00
|
|
|
int AudioProcessingImpl::MaybeInitializeCapture(
|
|
|
|
|
const StreamConfig& input_config,
|
|
|
|
|
const StreamConfig& output_config) {
|
2015-11-28 12:35:15 -08:00
|
|
|
ProcessingConfig processing_config;
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
bool reinitialization_required = false;
|
2015-11-28 12:35:15 -08:00
|
|
|
{
|
2020-01-03 14:54:20 +01:00
|
|
|
// Acquire the capture lock in order to access api_format. The lock is
|
|
|
|
|
// released immediately, as we may need to acquire the render lock as part
|
|
|
|
|
// of the conditional reinitialization.
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
2015-11-28 12:35:15 -08:00
|
|
|
processing_config = formats_.api_format;
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
reinitialization_required = UpdateActiveSubmoduleStates();
|
2015-11-28 12:35:15 -08:00
|
|
|
}
|
2015-11-16 16:27:42 -08:00
|
|
|
|
2019-05-23 14:28:00 +02:00
|
|
|
if (processing_config.input_stream() != input_config) {
|
|
|
|
|
processing_config.input_stream() = input_config;
|
|
|
|
|
reinitialization_required = true;
|
|
|
|
|
}
|
2015-07-23 11:41:39 -07:00
|
|
|
|
2019-05-23 14:28:00 +02:00
|
|
|
if (processing_config.output_stream() != output_config) {
|
|
|
|
|
processing_config.output_stream() = output_config;
|
|
|
|
|
reinitialization_required = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (reinitialization_required) {
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_render(&mutex_render_);
|
|
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
2019-05-23 14:28:00 +02:00
|
|
|
RETURN_ON_ERR(InitializeLocked(processing_config));
|
2015-11-28 12:35:15 -08:00
|
|
|
}
|
2020-01-03 14:54:20 +01:00
|
|
|
return kNoError;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int AudioProcessingImpl::ProcessStream(const float* const* src,
|
|
|
|
|
const StreamConfig& input_config,
|
|
|
|
|
const StreamConfig& output_config,
|
|
|
|
|
float* const* dest) {
|
|
|
|
|
TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_StreamConfig");
|
|
|
|
|
if (!src || !dest) {
|
|
|
|
|
return kNullPointerError;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
RETURN_ON_ERR(MaybeInitializeCapture(input_config, output_config));
|
2019-05-23 14:28:00 +02:00
|
|
|
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
2014-03-04 20:58:13 +00:00
|
|
|
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
if (aec_dump_) {
|
|
|
|
|
RecordUnprocessedCaptureStream(src);
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-15 12:15:46 +02:00
|
|
|
capture_.keyboard_info.Extract(src, formats_.api_format.input_stream());
|
2015-11-28 12:35:15 -08:00
|
|
|
capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream());
|
2019-10-09 13:34:36 +02:00
|
|
|
if (capture_.capture_fullband_audio) {
|
|
|
|
|
capture_.capture_fullband_audio->CopyFrom(
|
|
|
|
|
src, formats_.api_format.input_stream());
|
|
|
|
|
}
|
2016-09-16 15:02:15 -07:00
|
|
|
RETURN_ON_ERR(ProcessCaptureStreamLocked());
|
2019-10-09 13:02:14 +02:00
|
|
|
if (capture_.capture_fullband_audio) {
|
|
|
|
|
capture_.capture_fullband_audio->CopyTo(formats_.api_format.output_stream(),
|
|
|
|
|
dest);
|
|
|
|
|
} else {
|
|
|
|
|
capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest);
|
|
|
|
|
}
|
2014-03-04 20:58:13 +00:00
|
|
|
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
if (aec_dump_) {
|
|
|
|
|
RecordProcessedCaptureStream(dest);
|
|
|
|
|
}
|
2014-03-04 20:58:13 +00:00
|
|
|
return kNoError;
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-15 10:52:28 +02:00
|
|
|
void AudioProcessingImpl::HandleCaptureRuntimeSettings() {
|
2018-04-16 12:10:09 +02:00
|
|
|
RuntimeSetting setting;
|
2021-03-03 10:52:44 +00:00
|
|
|
int num_settings_processed = 0;
|
2018-05-15 10:52:28 +02:00
|
|
|
while (capture_runtime_settings_.Remove(&setting)) {
|
2018-09-10 10:18:07 +02:00
|
|
|
if (aec_dump_) {
|
|
|
|
|
aec_dump_->WriteRuntimeSetting(setting);
|
|
|
|
|
}
|
2018-04-16 12:10:09 +02:00
|
|
|
switch (setting.type()) {
|
|
|
|
|
case RuntimeSetting::Type::kCapturePreGain:
|
2021-03-15 16:31:04 +00:00
|
|
|
if (config_.pre_amplifier.enabled ||
|
|
|
|
|
config_.capture_level_adjustment.enabled) {
|
|
|
|
|
float value;
|
|
|
|
|
setting.GetFloat(&value);
|
|
|
|
|
// If the pre-amplifier is used, apply the new gain to the
|
|
|
|
|
// pre-amplifier regardless if the capture level adjustment is
|
|
|
|
|
// activated. This approach allows both functionalities to coexist
|
|
|
|
|
// until they have been properly merged.
|
|
|
|
|
if (config_.pre_amplifier.enabled) {
|
|
|
|
|
config_.pre_amplifier.fixed_gain_factor = value;
|
|
|
|
|
} else {
|
|
|
|
|
config_.capture_level_adjustment.pre_gain_factor = value;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Use both the pre-amplifier and the capture level adjustment gains
|
|
|
|
|
// as pre-gains.
|
|
|
|
|
float gain = 1.f;
|
|
|
|
|
if (config_.pre_amplifier.enabled) {
|
|
|
|
|
gain *= config_.pre_amplifier.fixed_gain_factor;
|
|
|
|
|
}
|
|
|
|
|
if (config_.capture_level_adjustment.enabled) {
|
|
|
|
|
gain *= config_.capture_level_adjustment.pre_gain_factor;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
submodules_.capture_levels_adjuster->SetPreGain(gain);
|
|
|
|
|
}
|
|
|
|
|
// TODO(bugs.chromium.org/9138): Log setting handling by Aec Dump.
|
|
|
|
|
break;
|
|
|
|
|
case RuntimeSetting::Type::kCapturePostGain:
|
|
|
|
|
if (config_.capture_level_adjustment.enabled) {
|
2018-04-16 16:31:22 +02:00
|
|
|
float value;
|
|
|
|
|
setting.GetFloat(&value);
|
2021-03-15 16:31:04 +00:00
|
|
|
config_.capture_level_adjustment.post_gain_factor = value;
|
|
|
|
|
submodules_.capture_levels_adjuster->SetPostGain(
|
|
|
|
|
config_.capture_level_adjustment.post_gain_factor);
|
2018-04-16 16:31:22 +02:00
|
|
|
}
|
|
|
|
|
// TODO(bugs.chromium.org/9138): Log setting handling by Aec Dump.
|
2018-04-16 12:10:09 +02:00
|
|
|
break;
|
2019-03-27 13:28:08 +01:00
|
|
|
case RuntimeSetting::Type::kCaptureCompressionGain: {
|
2019-11-18 08:52:22 +01:00
|
|
|
if (!submodules_.agc_manager) {
|
|
|
|
|
float value;
|
|
|
|
|
setting.GetFloat(&value);
|
|
|
|
|
int int_value = static_cast<int>(value + .5f);
|
|
|
|
|
config_.gain_controller1.compression_gain_db = int_value;
|
2020-01-13 14:43:13 +01:00
|
|
|
if (submodules_.gain_control) {
|
|
|
|
|
int error =
|
|
|
|
|
submodules_.gain_control->set_compression_gain_db(int_value);
|
|
|
|
|
RTC_DCHECK_EQ(kNoError, error);
|
|
|
|
|
}
|
2019-11-18 08:52:22 +01:00
|
|
|
}
|
2019-03-27 13:28:08 +01:00
|
|
|
break;
|
|
|
|
|
}
|
2019-04-26 11:33:37 +02:00
|
|
|
case RuntimeSetting::Type::kCaptureFixedPostGain: {
|
2020-01-03 10:36:34 +01:00
|
|
|
if (submodules_.gain_controller2) {
|
2019-04-26 11:33:37 +02:00
|
|
|
float value;
|
|
|
|
|
setting.GetFloat(&value);
|
|
|
|
|
config_.gain_controller2.fixed_digital.gain_db = value;
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.gain_controller2->ApplyConfig(config_.gain_controller2);
|
2019-04-26 11:33:37 +02:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
2019-05-10 15:50:02 +02:00
|
|
|
case RuntimeSetting::Type::kPlayoutVolumeChange: {
|
|
|
|
|
int value;
|
|
|
|
|
setting.GetInt(&value);
|
|
|
|
|
capture_.playout_volume = value;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2019-11-07 13:22:00 +01:00
|
|
|
case RuntimeSetting::Type::kPlayoutAudioDeviceChange:
|
|
|
|
|
RTC_NOTREACHED();
|
|
|
|
|
break;
|
2018-05-15 10:52:28 +02:00
|
|
|
case RuntimeSetting::Type::kCustomRenderProcessingRuntimeSetting:
|
|
|
|
|
RTC_NOTREACHED();
|
|
|
|
|
break;
|
|
|
|
|
case RuntimeSetting::Type::kNotSpecified:
|
|
|
|
|
RTC_NOTREACHED();
|
|
|
|
|
break;
|
2020-08-12 08:46:47 +02:00
|
|
|
case RuntimeSetting::Type::kCaptureOutputUsed:
|
2021-02-09 08:47:51 +01:00
|
|
|
bool value;
|
|
|
|
|
setting.GetBool(&value);
|
|
|
|
|
HandleCaptureOutputUsedSetting(value);
|
2020-08-12 08:46:47 +02:00
|
|
|
break;
|
2018-05-15 10:52:28 +02:00
|
|
|
}
|
2021-03-03 10:52:44 +00:00
|
|
|
++num_settings_processed;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (num_settings_processed >= RuntimeSettingQueueSize()) {
|
|
|
|
|
// Handle overrun of the runtime settings queue, which likely will has
|
|
|
|
|
// caused settings to be discarded.
|
|
|
|
|
HandleOverrunInCaptureRuntimeSettingsQueue();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioProcessingImpl::HandleOverrunInCaptureRuntimeSettingsQueue() {
|
|
|
|
|
// Fall back to a safe state for the case when a setting for capture output
|
|
|
|
|
// usage setting has been missed.
|
2021-03-12 23:08:09 +00:00
|
|
|
HandleCaptureOutputUsedSetting(/*capture_output_used=*/true);
|
2018-05-15 10:52:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioProcessingImpl::HandleRenderRuntimeSettings() {
|
|
|
|
|
RuntimeSetting setting;
|
|
|
|
|
while (render_runtime_settings_.Remove(&setting)) {
|
2018-09-10 10:18:07 +02:00
|
|
|
if (aec_dump_) {
|
|
|
|
|
aec_dump_->WriteRuntimeSetting(setting);
|
|
|
|
|
}
|
2018-05-15 10:52:28 +02:00
|
|
|
switch (setting.type()) {
|
2019-11-07 13:22:00 +01:00
|
|
|
case RuntimeSetting::Type::kPlayoutAudioDeviceChange: // fall-through
|
2019-11-11 13:32:20 +01:00
|
|
|
case RuntimeSetting::Type::kPlayoutVolumeChange: // fall-through
|
2018-05-15 10:52:28 +02:00
|
|
|
case RuntimeSetting::Type::kCustomRenderProcessingRuntimeSetting:
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.render_pre_processor) {
|
|
|
|
|
submodules_.render_pre_processor->SetRuntimeSetting(setting);
|
2018-05-15 10:52:28 +02:00
|
|
|
}
|
|
|
|
|
break;
|
2019-03-27 13:28:08 +01:00
|
|
|
case RuntimeSetting::Type::kCapturePreGain: // fall-through
|
2021-03-15 16:31:04 +00:00
|
|
|
case RuntimeSetting::Type::kCapturePostGain: // fall-through
|
2019-03-27 13:28:08 +01:00
|
|
|
case RuntimeSetting::Type::kCaptureCompressionGain: // fall-through
|
2019-04-26 11:33:37 +02:00
|
|
|
case RuntimeSetting::Type::kCaptureFixedPostGain: // fall-through
|
2020-08-12 08:46:47 +02:00
|
|
|
case RuntimeSetting::Type::kCaptureOutputUsed: // fall-through
|
2018-04-20 13:16:55 +02:00
|
|
|
case RuntimeSetting::Type::kNotSpecified:
|
2018-04-16 12:10:09 +02:00
|
|
|
RTC_NOTREACHED();
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-15 07:19:21 -07:00
|
|
|
void AudioProcessingImpl::QueueBandedRenderAudio(AudioBuffer* audio) {
|
2016-11-28 15:21:39 -08:00
|
|
|
RTC_DCHECK_GE(160, audio->num_frames_per_band());
|
2016-10-22 05:04:30 -07:00
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.echo_control_mobile) {
|
2019-04-29 12:14:50 +02:00
|
|
|
EchoControlMobileImpl::PackRenderAudioBuffer(audio, num_output_channels(),
|
|
|
|
|
num_reverse_channels(),
|
|
|
|
|
&aecm_render_queue_buffer_);
|
|
|
|
|
RTC_DCHECK(aecm_render_signal_queue_);
|
|
|
|
|
// Insert the samples into the queue.
|
|
|
|
|
if (!aecm_render_signal_queue_->Insert(&aecm_render_queue_buffer_)) {
|
|
|
|
|
// The data queue is full and needs to be emptied.
|
|
|
|
|
EmptyQueuedRenderAudio();
|
2016-10-25 04:45:24 -07:00
|
|
|
|
2019-04-29 12:14:50 +02:00
|
|
|
// Retry the insert (should always work).
|
|
|
|
|
bool result =
|
|
|
|
|
aecm_render_signal_queue_->Insert(&aecm_render_queue_buffer_);
|
|
|
|
|
RTC_DCHECK(result);
|
|
|
|
|
}
|
2016-10-22 05:04:30 -07:00
|
|
|
}
|
2016-10-25 05:42:20 -07:00
|
|
|
|
2020-01-13 14:43:13 +01:00
|
|
|
if (!submodules_.agc_manager && submodules_.gain_control) {
|
2019-11-22 18:22:04 +01:00
|
|
|
GainControlImpl::PackRenderAudioBuffer(*audio, &agc_render_queue_buffer_);
|
2016-10-25 05:42:20 -07:00
|
|
|
// Insert the samples into the queue.
|
|
|
|
|
if (!agc_render_signal_queue_->Insert(&agc_render_queue_buffer_)) {
|
|
|
|
|
// The data queue is full and needs to be emptied.
|
|
|
|
|
EmptyQueuedRenderAudio();
|
|
|
|
|
|
|
|
|
|
// Retry the insert (should always work).
|
|
|
|
|
bool result = agc_render_signal_queue_->Insert(&agc_render_queue_buffer_);
|
|
|
|
|
RTC_DCHECK(result);
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-05-15 07:19:21 -07:00
|
|
|
}
|
2016-10-28 05:39:16 -07:00
|
|
|
|
2017-05-15 07:19:21 -07:00
|
|
|
void AudioProcessingImpl::QueueNonbandedRenderAudio(AudioBuffer* audio) {
|
2016-10-28 05:39:16 -07:00
|
|
|
ResidualEchoDetector::PackRenderAudioBuffer(audio, &red_render_queue_buffer_);
|
|
|
|
|
|
|
|
|
|
// Insert the samples into the queue.
|
|
|
|
|
if (!red_render_signal_queue_->Insert(&red_render_queue_buffer_)) {
|
|
|
|
|
// The data queue is full and needs to be emptied.
|
|
|
|
|
EmptyQueuedRenderAudio();
|
|
|
|
|
|
|
|
|
|
// Retry the insert (should always work).
|
|
|
|
|
bool result = red_render_signal_queue_->Insert(&red_render_queue_buffer_);
|
|
|
|
|
RTC_DCHECK(result);
|
|
|
|
|
}
|
2016-10-22 05:04:30 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioProcessingImpl::AllocateRenderQueue() {
|
2016-10-25 05:42:20 -07:00
|
|
|
const size_t new_agc_render_queue_element_max_size =
|
2017-05-15 07:19:21 -07:00
|
|
|
std::max(static_cast<size_t>(1), kMaxAllowedValuesOfSamplesPerBand);
|
2016-10-25 05:42:20 -07:00
|
|
|
|
2016-10-28 05:39:16 -07:00
|
|
|
const size_t new_red_render_queue_element_max_size =
|
|
|
|
|
std::max(static_cast<size_t>(1), kMaxAllowedValuesOfSamplesPerFrame);
|
|
|
|
|
|
2016-10-25 04:45:24 -07:00
|
|
|
// Reallocate the queues if the queue item sizes are too small to fit the
|
|
|
|
|
// data to put in the queues.
|
|
|
|
|
|
2016-10-25 05:42:20 -07:00
|
|
|
if (agc_render_queue_element_max_size_ <
|
|
|
|
|
new_agc_render_queue_element_max_size) {
|
|
|
|
|
agc_render_queue_element_max_size_ = new_agc_render_queue_element_max_size;
|
2016-10-25 04:45:24 -07:00
|
|
|
|
|
|
|
|
std::vector<int16_t> template_queue_element(
|
2016-10-25 05:42:20 -07:00
|
|
|
agc_render_queue_element_max_size_);
|
2016-10-25 04:45:24 -07:00
|
|
|
|
2016-10-25 05:42:20 -07:00
|
|
|
agc_render_signal_queue_.reset(
|
2016-10-25 04:45:24 -07:00
|
|
|
new SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>(
|
|
|
|
|
kMaxNumFramesToBuffer, template_queue_element,
|
|
|
|
|
RenderQueueItemVerifier<int16_t>(
|
2016-10-25 05:42:20 -07:00
|
|
|
agc_render_queue_element_max_size_)));
|
2016-10-25 04:45:24 -07:00
|
|
|
|
2016-10-25 05:42:20 -07:00
|
|
|
agc_render_queue_buffer_.resize(agc_render_queue_element_max_size_);
|
|
|
|
|
agc_capture_queue_buffer_.resize(agc_render_queue_element_max_size_);
|
2016-10-25 04:45:24 -07:00
|
|
|
} else {
|
2016-10-25 05:42:20 -07:00
|
|
|
agc_render_signal_queue_->Clear();
|
2016-10-22 05:04:30 -07:00
|
|
|
}
|
2016-10-28 05:39:16 -07:00
|
|
|
|
|
|
|
|
if (red_render_queue_element_max_size_ <
|
|
|
|
|
new_red_render_queue_element_max_size) {
|
|
|
|
|
red_render_queue_element_max_size_ = new_red_render_queue_element_max_size;
|
|
|
|
|
|
|
|
|
|
std::vector<float> template_queue_element(
|
|
|
|
|
red_render_queue_element_max_size_);
|
|
|
|
|
|
|
|
|
|
red_render_signal_queue_.reset(
|
|
|
|
|
new SwapQueue<std::vector<float>, RenderQueueItemVerifier<float>>(
|
|
|
|
|
kMaxNumFramesToBuffer, template_queue_element,
|
|
|
|
|
RenderQueueItemVerifier<float>(
|
|
|
|
|
red_render_queue_element_max_size_)));
|
|
|
|
|
|
|
|
|
|
red_render_queue_buffer_.resize(red_render_queue_element_max_size_);
|
|
|
|
|
red_capture_queue_buffer_.resize(red_render_queue_element_max_size_);
|
|
|
|
|
} else {
|
|
|
|
|
red_render_signal_queue_->Clear();
|
|
|
|
|
}
|
2016-10-22 05:04:30 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioProcessingImpl::EmptyQueuedRenderAudio() {
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
2020-05-14 14:31:18 +02:00
|
|
|
EmptyQueuedRenderAudioLocked();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioProcessingImpl::EmptyQueuedRenderAudioLocked() {
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.echo_control_mobile) {
|
2019-04-29 12:14:50 +02:00
|
|
|
RTC_DCHECK(aecm_render_signal_queue_);
|
|
|
|
|
while (aecm_render_signal_queue_->Remove(&aecm_capture_queue_buffer_)) {
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.echo_control_mobile->ProcessRenderAudio(
|
2019-04-29 12:14:50 +02:00
|
|
|
aecm_capture_queue_buffer_);
|
|
|
|
|
}
|
2016-10-25 05:42:20 -07:00
|
|
|
}
|
|
|
|
|
|
2020-01-13 14:43:13 +01:00
|
|
|
if (submodules_.gain_control) {
|
|
|
|
|
while (agc_render_signal_queue_->Remove(&agc_capture_queue_buffer_)) {
|
|
|
|
|
submodules_.gain_control->ProcessRenderAudio(agc_capture_queue_buffer_);
|
|
|
|
|
}
|
2016-10-22 05:04:30 -07:00
|
|
|
}
|
2016-10-28 05:39:16 -07:00
|
|
|
|
|
|
|
|
while (red_render_signal_queue_->Remove(&red_capture_queue_buffer_)) {
|
2019-10-18 13:29:43 +02:00
|
|
|
RTC_DCHECK(submodules_.echo_detector);
|
|
|
|
|
submodules_.echo_detector->AnalyzeRenderAudio(red_capture_queue_buffer_);
|
2016-10-28 05:39:16 -07:00
|
|
|
}
|
2016-10-22 05:04:30 -07:00
|
|
|
}
|
|
|
|
|
|
2020-03-16 12:06:02 +01:00
|
|
|
int AudioProcessingImpl::ProcessStream(const int16_t* const src,
|
|
|
|
|
const StreamConfig& input_config,
|
|
|
|
|
const StreamConfig& output_config,
|
2020-03-19 14:55:58 +01:00
|
|
|
int16_t* const dest) {
|
2020-03-17 13:23:58 +01:00
|
|
|
TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_AudioFrame");
|
2020-01-03 14:54:20 +01:00
|
|
|
RETURN_ON_ERR(MaybeInitializeCapture(input_config, output_config));
|
2019-05-23 14:28:00 +02:00
|
|
|
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
2011-07-07 08:21:25 +00:00
|
|
|
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
if (aec_dump_) {
|
2020-03-16 12:06:02 +01:00
|
|
|
RecordUnprocessedCaptureStream(src, input_config);
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
}
|
|
|
|
|
|
2020-03-16 12:06:02 +01:00
|
|
|
capture_.capture_audio->CopyFrom(src, input_config);
|
2019-10-11 13:14:44 +02:00
|
|
|
if (capture_.capture_fullband_audio) {
|
2020-03-16 12:06:02 +01:00
|
|
|
capture_.capture_fullband_audio->CopyFrom(src, input_config);
|
2019-10-11 13:14:44 +02:00
|
|
|
}
|
2016-09-16 15:02:15 -07:00
|
|
|
RETURN_ON_ERR(ProcessCaptureStreamLocked());
|
2019-10-09 13:34:36 +02:00
|
|
|
if (submodule_states_.CaptureMultiBandProcessingPresent() ||
|
2019-08-15 12:15:46 +02:00
|
|
|
submodule_states_.CaptureFullBandProcessingActive()) {
|
2019-10-09 13:02:14 +02:00
|
|
|
if (capture_.capture_fullband_audio) {
|
2020-03-16 12:06:02 +01:00
|
|
|
capture_.capture_fullband_audio->CopyTo(output_config, dest);
|
2019-10-09 13:02:14 +02:00
|
|
|
} else {
|
2020-03-16 12:06:02 +01:00
|
|
|
capture_.capture_audio->CopyTo(output_config, dest);
|
2019-10-09 13:02:14 +02:00
|
|
|
}
|
2019-08-15 12:15:46 +02:00
|
|
|
}
|
2020-03-16 12:06:02 +01:00
|
|
|
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
if (aec_dump_) {
|
2020-03-16 12:06:02 +01:00
|
|
|
RecordProcessedCaptureStream(dest, output_config);
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
}
|
2014-03-04 20:58:13 +00:00
|
|
|
|
|
|
|
|
return kNoError;
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-16 15:02:15 -07:00
|
|
|
int AudioProcessingImpl::ProcessCaptureStreamLocked() {
|
2020-05-14 14:31:18 +02:00
|
|
|
EmptyQueuedRenderAudioLocked();
|
2018-05-15 10:52:28 +02:00
|
|
|
HandleCaptureRuntimeSettings();
|
2018-04-16 12:10:09 +02:00
|
|
|
|
2016-03-15 09:34:24 -07:00
|
|
|
// Ensure that not both the AEC and AECM are active at the same time.
|
2018-07-23 14:48:07 +00:00
|
|
|
// TODO(peah): Simplify once the public API Enable functions for these
|
|
|
|
|
// are moved to APM.
|
2019-12-23 10:22:08 +01:00
|
|
|
RTC_DCHECK_LE(
|
|
|
|
|
!!submodules_.echo_controller + !!submodules_.echo_control_mobile, 1);
|
2016-03-15 09:34:24 -07:00
|
|
|
|
2016-09-16 15:02:15 -07:00
|
|
|
AudioBuffer* capture_buffer = capture_.capture_audio.get(); // For brevity.
|
2019-12-20 00:42:22 +01:00
|
|
|
AudioBuffer* linear_aec_buffer = capture_.linear_aec_output.get();
|
2015-08-14 10:35:55 -07:00
|
|
|
|
2019-12-10 13:04:15 +01:00
|
|
|
if (submodules_.high_pass_filter &&
|
|
|
|
|
config_.high_pass_filter.apply_in_full_band &&
|
|
|
|
|
!constants_.enforce_split_band_hpf) {
|
|
|
|
|
submodules_.high_pass_filter->Process(capture_buffer,
|
|
|
|
|
/*use_split_band_data=*/false);
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-15 16:31:04 +00:00
|
|
|
if (submodules_.capture_levels_adjuster) {
|
|
|
|
|
// If the analog mic gain emulation is active, get the emulated analog mic
|
|
|
|
|
// gain and pass it to the analog gain control functionality.
|
|
|
|
|
if (config_.capture_level_adjustment.analog_mic_gain_emulation.enabled) {
|
|
|
|
|
int level = submodules_.capture_levels_adjuster->GetAnalogMicGainLevel();
|
|
|
|
|
if (submodules_.agc_manager) {
|
|
|
|
|
submodules_.agc_manager->set_stream_analog_level(level);
|
|
|
|
|
} else if (submodules_.gain_control) {
|
|
|
|
|
int error = submodules_.gain_control->set_stream_analog_level(level);
|
|
|
|
|
RTC_DCHECK_EQ(kNoError, error);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
submodules_.capture_levels_adjuster->ApplyPreLevelAdjustment(
|
|
|
|
|
*capture_buffer);
|
2018-04-16 16:31:22 +02:00
|
|
|
}
|
|
|
|
|
|
2019-08-20 09:19:21 +02:00
|
|
|
capture_input_rms_.Analyze(rtc::ArrayView<const float>(
|
2019-08-22 11:51:13 +02:00
|
|
|
capture_buffer->channels_const()[0],
|
2016-11-29 08:09:09 -08:00
|
|
|
capture_nonlocked_.capture_processing_format.num_frames()));
|
2016-12-20 13:45:58 -08:00
|
|
|
const bool log_rms = ++capture_rms_interval_counter_ >= 1000;
|
|
|
|
|
if (log_rms) {
|
|
|
|
|
capture_rms_interval_counter_ = 0;
|
|
|
|
|
RmsLevel::Levels levels = capture_input_rms_.AverageAndPeak();
|
2016-12-06 04:28:04 -08:00
|
|
|
RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.ApmCaptureInputLevelAverageRms",
|
|
|
|
|
levels.average, 1, RmsLevel::kMinLevelDb, 64);
|
|
|
|
|
RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.ApmCaptureInputLevelPeakRms",
|
|
|
|
|
levels.peak, 1, RmsLevel::kMinLevelDb, 64);
|
2016-11-29 08:09:09 -08:00
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.echo_controller) {
|
2018-07-16 17:08:41 +02:00
|
|
|
// Detect and flag any change in the analog gain.
|
2020-05-14 14:31:18 +02:00
|
|
|
int analog_mic_level = recommended_stream_analog_level_locked();
|
2018-07-16 17:08:41 +02:00
|
|
|
capture_.echo_path_gain_change =
|
|
|
|
|
capture_.prev_analog_mic_level != analog_mic_level &&
|
|
|
|
|
capture_.prev_analog_mic_level != -1;
|
|
|
|
|
capture_.prev_analog_mic_level = analog_mic_level;
|
|
|
|
|
|
2021-03-15 16:31:04 +00:00
|
|
|
// Detect and flag any change in the capture level adjustment pre-gain.
|
|
|
|
|
if (submodules_.capture_levels_adjuster) {
|
|
|
|
|
float pre_adjustment_gain =
|
|
|
|
|
submodules_.capture_levels_adjuster->GetPreAdjustmentGain();
|
2018-10-02 17:00:59 +02:00
|
|
|
capture_.echo_path_gain_change =
|
|
|
|
|
capture_.echo_path_gain_change ||
|
2021-03-15 16:31:04 +00:00
|
|
|
(capture_.prev_pre_adjustment_gain != pre_adjustment_gain &&
|
|
|
|
|
capture_.prev_pre_adjustment_gain >= 0.f);
|
|
|
|
|
capture_.prev_pre_adjustment_gain = pre_adjustment_gain;
|
2018-10-02 17:00:59 +02:00
|
|
|
}
|
2019-05-10 15:50:02 +02:00
|
|
|
|
|
|
|
|
// Detect volume change.
|
|
|
|
|
capture_.echo_path_gain_change =
|
|
|
|
|
capture_.echo_path_gain_change ||
|
|
|
|
|
(capture_.prev_playout_volume != capture_.playout_volume &&
|
|
|
|
|
capture_.prev_playout_volume >= 0);
|
|
|
|
|
capture_.prev_playout_volume = capture_.playout_volume;
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.echo_controller->AnalyzeCapture(capture_buffer);
|
2016-12-14 01:16:23 -08:00
|
|
|
}
|
|
|
|
|
|
2020-01-13 14:43:13 +01:00
|
|
|
if (submodules_.agc_manager) {
|
2019-11-22 12:11:40 +01:00
|
|
|
submodules_.agc_manager->AnalyzePreProcess(capture_buffer);
|
2014-12-15 09:41:24 +00:00
|
|
|
}
|
|
|
|
|
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
if (submodule_states_.CaptureMultiBandSubModulesActive() &&
|
|
|
|
|
SampleRateSupportsMultiBand(
|
2016-09-16 15:02:15 -07:00
|
|
|
capture_nonlocked_.capture_processing_format.sample_rate_hz())) {
|
|
|
|
|
capture_buffer->SplitIntoFrequencyBands();
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2019-11-27 09:34:22 +01:00
|
|
|
const bool multi_channel_capture = config_.pipeline.multi_channel_capture &&
|
|
|
|
|
constants_.multi_channel_capture_support;
|
|
|
|
|
if (submodules_.echo_controller && !multi_channel_capture) {
|
2017-02-23 05:16:26 -08:00
|
|
|
// Force down-mixing of the number of channels after the detection of
|
|
|
|
|
// capture signal saturation.
|
|
|
|
|
// TODO(peah): Look into ensuring that this kind of tampering with the
|
|
|
|
|
// AudioBuffer functionality should not be needed.
|
|
|
|
|
capture_buffer->set_num_channels(1);
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-10 13:04:15 +01:00
|
|
|
if (submodules_.high_pass_filter &&
|
|
|
|
|
(!config_.high_pass_filter.apply_in_full_band ||
|
|
|
|
|
constants_.enforce_split_band_hpf)) {
|
|
|
|
|
submodules_.high_pass_filter->Process(capture_buffer,
|
|
|
|
|
/*use_split_band_data=*/true);
|
2016-11-22 07:24:52 -08:00
|
|
|
}
|
2019-10-29 22:59:44 +01:00
|
|
|
|
2020-01-13 14:43:13 +01:00
|
|
|
if (submodules_.gain_control) {
|
|
|
|
|
RETURN_ON_ERR(
|
|
|
|
|
submodules_.gain_control->AnalyzeCaptureAudio(*capture_buffer));
|
|
|
|
|
}
|
2019-12-20 00:42:22 +01:00
|
|
|
|
2020-01-30 07:40:58 +01:00
|
|
|
if ((!config_.noise_suppression.analyze_linear_aec_output_when_available ||
|
|
|
|
|
!linear_aec_buffer || submodules_.echo_control_mobile) &&
|
|
|
|
|
submodules_.noise_suppressor) {
|
|
|
|
|
submodules_.noise_suppressor->Analyze(*capture_buffer);
|
2019-10-16 11:46:11 +02:00
|
|
|
}
|
2016-03-15 09:34:24 -07:00
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.echo_control_mobile) {
|
2019-04-29 12:14:50 +02:00
|
|
|
// Ensure that the stream delay was set before the call to the
|
|
|
|
|
// AECM ProcessCaptureAudio function.
|
2020-01-17 10:55:09 +01:00
|
|
|
if (!capture_.was_stream_delay_set) {
|
2019-04-29 12:14:50 +02:00
|
|
|
return AudioProcessing::kStreamParameterNotSetError;
|
|
|
|
|
}
|
2018-02-12 21:42:56 +01:00
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.noise_suppressor) {
|
2019-10-29 22:59:44 +01:00
|
|
|
submodules_.noise_suppressor->Process(capture_buffer);
|
2018-04-18 09:35:13 +02:00
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
RETURN_ON_ERR(submodules_.echo_control_mobile->ProcessCaptureAudio(
|
2017-02-06 03:39:42 -08:00
|
|
|
capture_buffer, stream_delay_ms()));
|
2019-04-29 12:14:50 +02:00
|
|
|
} else {
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.echo_controller) {
|
2019-04-29 12:14:50 +02:00
|
|
|
data_dumper_->DumpRaw("stream_delay", stream_delay_ms());
|
2016-12-14 01:16:23 -08:00
|
|
|
|
2020-01-17 10:55:09 +01:00
|
|
|
if (capture_.was_stream_delay_set) {
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.echo_controller->SetAudioBufferDelay(stream_delay_ms());
|
2019-04-29 12:14:50 +02:00
|
|
|
}
|
2016-03-15 04:32:28 -07:00
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.echo_controller->ProcessCapture(
|
2019-11-13 11:12:29 +01:00
|
|
|
capture_buffer, linear_aec_buffer, capture_.echo_path_gain_change);
|
2019-04-29 12:14:50 +02:00
|
|
|
}
|
|
|
|
|
|
2019-12-20 00:42:22 +01:00
|
|
|
if (config_.noise_suppression.analyze_linear_aec_output_when_available &&
|
2020-01-30 07:40:58 +01:00
|
|
|
linear_aec_buffer && submodules_.noise_suppressor) {
|
|
|
|
|
submodules_.noise_suppressor->Analyze(*linear_aec_buffer);
|
2019-12-20 00:42:22 +01:00
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.noise_suppressor) {
|
2019-10-29 22:59:44 +01:00
|
|
|
submodules_.noise_suppressor->Process(capture_buffer);
|
2019-10-16 11:46:11 +02:00
|
|
|
}
|
2017-06-07 10:08:10 +02:00
|
|
|
}
|
2016-10-28 05:39:16 -07:00
|
|
|
|
2018-12-21 16:29:27 +01:00
|
|
|
if (config_.voice_detection.enabled) {
|
|
|
|
|
capture_.stats.voice_detected =
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.voice_detector->ProcessCaptureAudio(capture_buffer);
|
2018-12-21 16:29:27 +01:00
|
|
|
} else {
|
|
|
|
|
capture_.stats.voice_detected = absl::nullopt;
|
|
|
|
|
}
|
2014-12-15 09:41:24 +00:00
|
|
|
|
2020-01-13 14:43:13 +01:00
|
|
|
if (submodules_.agc_manager) {
|
2019-11-22 12:11:40 +01:00
|
|
|
submodules_.agc_manager->Process(capture_buffer);
|
|
|
|
|
|
|
|
|
|
absl::optional<int> new_digital_gain =
|
|
|
|
|
submodules_.agc_manager->GetDigitalComressionGain();
|
2020-01-13 14:43:13 +01:00
|
|
|
if (new_digital_gain && submodules_.gain_control) {
|
2019-11-22 12:11:40 +01:00
|
|
|
submodules_.gain_control->set_compression_gain_db(*new_digital_gain);
|
|
|
|
|
}
|
2014-12-15 09:41:24 +00:00
|
|
|
}
|
2020-01-13 14:43:13 +01:00
|
|
|
|
|
|
|
|
if (submodules_.gain_control) {
|
|
|
|
|
// TODO(peah): Add reporting from AEC3 whether there is echo.
|
|
|
|
|
RETURN_ON_ERR(submodules_.gain_control->ProcessCaptureAudio(
|
|
|
|
|
capture_buffer, /*stream_has_echo*/ false));
|
|
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
|
2021-03-11 11:40:46 +00:00
|
|
|
if (submodule_states_.CaptureMultiBandProcessingPresent() &&
|
|
|
|
|
SampleRateSupportsMultiBand(
|
|
|
|
|
capture_nonlocked_.capture_processing_format.sample_rate_hz())) {
|
|
|
|
|
capture_buffer->MergeFrequencyBands();
|
|
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
|
2021-03-12 23:08:09 +00:00
|
|
|
capture_.stats.output_rms_dbfs = absl::nullopt;
|
|
|
|
|
if (capture_.capture_output_used) {
|
|
|
|
|
if (capture_.capture_fullband_audio) {
|
|
|
|
|
const auto& ec = submodules_.echo_controller;
|
|
|
|
|
bool ec_active = ec ? ec->ActiveProcessing() : false;
|
|
|
|
|
// Only update the fullband buffer if the multiband processing has changed
|
|
|
|
|
// the signal. Keep the original signal otherwise.
|
|
|
|
|
if (submodule_states_.CaptureMultiBandProcessingActive(ec_active)) {
|
|
|
|
|
capture_buffer->CopyTo(capture_.capture_fullband_audio.get());
|
|
|
|
|
}
|
|
|
|
|
capture_buffer = capture_.capture_fullband_audio.get();
|
2019-10-09 13:34:36 +02:00
|
|
|
}
|
2019-10-09 13:02:14 +02:00
|
|
|
|
2021-03-12 23:08:09 +00:00
|
|
|
if (config_.residual_echo_detector.enabled) {
|
|
|
|
|
RTC_DCHECK(submodules_.echo_detector);
|
|
|
|
|
submodules_.echo_detector->AnalyzeCaptureAudio(
|
|
|
|
|
rtc::ArrayView<const float>(capture_buffer->channels()[0],
|
|
|
|
|
capture_buffer->num_frames()));
|
|
|
|
|
}
|
2017-05-15 07:19:21 -07:00
|
|
|
|
2021-03-12 23:08:09 +00:00
|
|
|
// TODO(aluebs): Investigate if the transient suppression placement should
|
|
|
|
|
// be before or after the AGC.
|
|
|
|
|
if (submodules_.transient_suppressor) {
|
|
|
|
|
float voice_probability =
|
|
|
|
|
submodules_.agc_manager.get()
|
|
|
|
|
? submodules_.agc_manager->voice_probability()
|
|
|
|
|
: 1.f;
|
|
|
|
|
|
|
|
|
|
submodules_.transient_suppressor->Suppress(
|
|
|
|
|
capture_buffer->channels()[0], capture_buffer->num_frames(),
|
|
|
|
|
capture_buffer->num_channels(),
|
|
|
|
|
capture_buffer->split_bands_const(0)[kBand0To8kHz],
|
|
|
|
|
capture_buffer->num_frames_per_band(),
|
|
|
|
|
capture_.keyboard_info.keyboard_data,
|
|
|
|
|
capture_.keyboard_info.num_keyboard_frames, voice_probability,
|
|
|
|
|
capture_.key_pressed);
|
|
|
|
|
}
|
2014-12-15 09:41:24 +00:00
|
|
|
|
2021-03-12 23:08:09 +00:00
|
|
|
// Experimental APM sub-module that analyzes |capture_buffer|.
|
|
|
|
|
if (submodules_.capture_analyzer) {
|
|
|
|
|
submodules_.capture_analyzer->Analyze(capture_buffer);
|
|
|
|
|
}
|
2014-12-15 09:41:24 +00:00
|
|
|
|
2021-03-12 23:08:09 +00:00
|
|
|
if (submodules_.gain_controller2) {
|
|
|
|
|
submodules_.gain_controller2->NotifyAnalogLevel(
|
|
|
|
|
recommended_stream_analog_level_locked());
|
|
|
|
|
submodules_.gain_controller2->Process(capture_buffer);
|
|
|
|
|
}
|
2018-08-29 10:37:09 +02:00
|
|
|
|
2021-03-12 23:08:09 +00:00
|
|
|
if (submodules_.capture_post_processor) {
|
|
|
|
|
submodules_.capture_post_processor->Process(capture_buffer);
|
|
|
|
|
}
|
2017-05-22 06:57:06 -07:00
|
|
|
|
2021-03-12 23:08:09 +00:00
|
|
|
// The level estimator operates on the recombined data.
|
|
|
|
|
if (config_.level_estimation.enabled) {
|
|
|
|
|
submodules_.output_level_estimator->ProcessStream(*capture_buffer);
|
|
|
|
|
capture_.stats.output_rms_dbfs =
|
|
|
|
|
submodules_.output_level_estimator->RMS();
|
|
|
|
|
}
|
2017-09-25 12:04:02 +02:00
|
|
|
|
2021-03-12 23:08:09 +00:00
|
|
|
capture_output_rms_.Analyze(rtc::ArrayView<const float>(
|
|
|
|
|
capture_buffer->channels_const()[0],
|
|
|
|
|
capture_nonlocked_.capture_processing_format.num_frames()));
|
|
|
|
|
if (log_rms) {
|
|
|
|
|
RmsLevel::Levels levels = capture_output_rms_.AverageAndPeak();
|
|
|
|
|
RTC_HISTOGRAM_COUNTS_LINEAR(
|
|
|
|
|
"WebRTC.Audio.ApmCaptureOutputLevelAverageRms", levels.average, 1,
|
|
|
|
|
RmsLevel::kMinLevelDb, 64);
|
|
|
|
|
RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.ApmCaptureOutputLevelPeakRms",
|
|
|
|
|
levels.peak, 1, RmsLevel::kMinLevelDb, 64);
|
|
|
|
|
}
|
2014-03-04 20:58:13 +00:00
|
|
|
|
2021-03-12 23:08:09 +00:00
|
|
|
if (submodules_.agc_manager) {
|
|
|
|
|
int level = recommended_stream_analog_level_locked();
|
|
|
|
|
data_dumper_->DumpRaw("experimental_gain_control_stream_analog_level", 1,
|
|
|
|
|
&level);
|
|
|
|
|
}
|
2016-12-20 13:45:58 -08:00
|
|
|
|
2021-03-12 23:08:09 +00:00
|
|
|
// Compute echo-detector stats.
|
|
|
|
|
if (config_.residual_echo_detector.enabled) {
|
|
|
|
|
RTC_DCHECK(submodules_.echo_detector);
|
|
|
|
|
auto ed_metrics = submodules_.echo_detector->GetMetrics();
|
|
|
|
|
capture_.stats.residual_echo_likelihood = ed_metrics.echo_likelihood;
|
|
|
|
|
capture_.stats.residual_echo_likelihood_recent_max =
|
|
|
|
|
ed_metrics.echo_likelihood_recent_max;
|
|
|
|
|
}
|
2019-11-18 08:52:22 +01:00
|
|
|
}
|
|
|
|
|
|
2021-03-12 23:08:09 +00:00
|
|
|
// Compute echo-controller stats.
|
2019-12-30 14:32:14 +01:00
|
|
|
if (submodules_.echo_controller) {
|
|
|
|
|
auto ec_metrics = submodules_.echo_controller->GetMetrics();
|
|
|
|
|
capture_.stats.echo_return_loss = ec_metrics.echo_return_loss;
|
|
|
|
|
capture_.stats.echo_return_loss_enhancement =
|
|
|
|
|
ec_metrics.echo_return_loss_enhancement;
|
|
|
|
|
capture_.stats.delay_ms = ec_metrics.delay_ms;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Pass stats for reporting.
|
|
|
|
|
stats_reporter_.UpdateStatistics(capture_.stats);
|
|
|
|
|
|
2021-03-15 16:31:04 +00:00
|
|
|
if (submodules_.capture_levels_adjuster) {
|
|
|
|
|
submodules_.capture_levels_adjuster->ApplyPostLevelAdjustment(
|
|
|
|
|
*capture_buffer);
|
|
|
|
|
|
|
|
|
|
// If the analog mic gain emulation is active, retrieve the level from the
|
|
|
|
|
// analog gain control and set it to mic gain emulator.
|
|
|
|
|
if (config_.capture_level_adjustment.analog_mic_gain_emulation.enabled) {
|
|
|
|
|
if (submodules_.agc_manager) {
|
|
|
|
|
submodules_.capture_levels_adjuster->SetAnalogMicGainLevel(
|
|
|
|
|
submodules_.agc_manager->stream_analog_level());
|
|
|
|
|
} else if (submodules_.gain_control) {
|
|
|
|
|
submodules_.capture_levels_adjuster->SetAnalogMicGainLevel(
|
|
|
|
|
submodules_.gain_control->stream_analog_level());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-12 14:18:36 +00:00
|
|
|
// Temporarily set the output to zero after the stream has been unmuted
|
|
|
|
|
// (capture output is again used). The purpose of this is to avoid clicks and
|
|
|
|
|
// artefacts in the audio that results when the processing again is
|
|
|
|
|
// reactivated after unmuting.
|
|
|
|
|
if (!capture_.capture_output_used_last_frame &&
|
|
|
|
|
capture_.capture_output_used) {
|
|
|
|
|
for (size_t ch = 0; ch < capture_buffer->num_channels(); ++ch) {
|
|
|
|
|
rtc::ArrayView<float> channel_view(capture_buffer->channels()[ch],
|
|
|
|
|
capture_buffer->num_frames());
|
|
|
|
|
std::fill(channel_view.begin(), channel_view.end(), 0.f);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
capture_.capture_output_used_last_frame = capture_.capture_output_used;
|
|
|
|
|
|
2015-11-28 12:35:15 -08:00
|
|
|
capture_.was_stream_delay_set = false;
|
2014-03-04 20:58:13 +00:00
|
|
|
return kNoError;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-22 15:21:31 +02:00
|
|
|
int AudioProcessingImpl::AnalyzeReverseStream(
|
|
|
|
|
const float* const* data,
|
|
|
|
|
const StreamConfig& reverse_config) {
|
|
|
|
|
TRACE_EVENT0("webrtc", "AudioProcessing::AnalyzeReverseStream_StreamConfig");
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock(&mutex_render_);
|
2019-10-22 15:21:31 +02:00
|
|
|
return AnalyzeReverseStreamLocked(data, reverse_config, reverse_config);
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-16 15:02:15 -07:00
|
|
|
int AudioProcessingImpl::ProcessReverseStream(const float* const* src,
|
|
|
|
|
const StreamConfig& input_config,
|
|
|
|
|
const StreamConfig& output_config,
|
|
|
|
|
float* const* dest) {
|
2015-12-17 06:42:29 -08:00
|
|
|
TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_StreamConfig");
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock(&mutex_render_);
|
2016-09-16 15:02:15 -07:00
|
|
|
RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, input_config, output_config));
|
2017-12-18 16:02:40 +01:00
|
|
|
if (submodule_states_.RenderMultiBandProcessingActive() ||
|
|
|
|
|
submodule_states_.RenderFullBandProcessingActive()) {
|
2015-11-28 12:35:15 -08:00
|
|
|
render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(),
|
|
|
|
|
dest);
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
} else if (formats_.api_format.reverse_input_stream() !=
|
|
|
|
|
formats_.api_format.reverse_output_stream()) {
|
2016-09-16 15:02:15 -07:00
|
|
|
render_.render_converter->Convert(src, input_config.num_samples(), dest,
|
|
|
|
|
output_config.num_samples());
|
2015-08-14 10:35:55 -07:00
|
|
|
} else {
|
2016-09-16 15:02:15 -07:00
|
|
|
CopyAudioIfNeeded(src, input_config.num_frames(),
|
|
|
|
|
input_config.num_channels(), dest);
|
2015-08-14 10:35:55 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return kNoError;
|
2015-07-23 11:41:39 -07:00
|
|
|
}
|
|
|
|
|
|
2015-11-28 12:35:15 -08:00
|
|
|
int AudioProcessingImpl::AnalyzeReverseStreamLocked(
|
2015-08-14 10:35:55 -07:00
|
|
|
const float* const* src,
|
2016-09-16 15:02:15 -07:00
|
|
|
const StreamConfig& input_config,
|
|
|
|
|
const StreamConfig& output_config) {
|
2015-11-28 12:35:15 -08:00
|
|
|
if (src == nullptr) {
|
2014-03-04 20:58:13 +00:00
|
|
|
return kNullPointerError;
|
|
|
|
|
}
|
2011-11-15 16:57:56 +00:00
|
|
|
|
2016-09-16 15:02:15 -07:00
|
|
|
if (input_config.num_channels() == 0) {
|
2015-07-23 11:41:39 -07:00
|
|
|
return kBadNumberChannelsError;
|
2014-03-04 20:58:13 +00:00
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
|
2015-11-28 12:35:15 -08:00
|
|
|
ProcessingConfig processing_config = formats_.api_format;
|
2016-09-16 15:02:15 -07:00
|
|
|
processing_config.reverse_input_stream() = input_config;
|
|
|
|
|
processing_config.reverse_output_stream() = output_config;
|
2015-07-23 11:41:39 -07:00
|
|
|
|
2015-11-28 12:35:15 -08:00
|
|
|
RETURN_ON_ERR(MaybeInitializeRender(processing_config));
|
2018-04-12 22:44:09 +02:00
|
|
|
RTC_DCHECK_EQ(input_config.num_frames(),
|
|
|
|
|
formats_.api_format.reverse_input_stream().num_frames());
|
2015-07-23 11:41:39 -07:00
|
|
|
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
if (aec_dump_) {
|
|
|
|
|
const size_t channel_size =
|
|
|
|
|
formats_.api_format.reverse_input_stream().num_frames();
|
|
|
|
|
const size_t num_channels =
|
|
|
|
|
formats_.api_format.reverse_input_stream().num_channels();
|
|
|
|
|
aec_dump_->WriteRenderStreamMessage(
|
2018-02-16 11:54:07 +01:00
|
|
|
AudioFrameView<const float>(src, num_channels, channel_size));
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
}
|
2015-11-28 12:35:15 -08:00
|
|
|
render_.render_audio->CopyFrom(src,
|
|
|
|
|
formats_.api_format.reverse_input_stream());
|
2016-09-16 15:02:15 -07:00
|
|
|
return ProcessRenderStreamLocked();
|
2015-08-14 10:35:55 -07:00
|
|
|
}
|
|
|
|
|
|
2020-03-16 12:06:02 +01:00
|
|
|
int AudioProcessingImpl::ProcessReverseStream(const int16_t* const src,
|
|
|
|
|
const StreamConfig& input_config,
|
|
|
|
|
const StreamConfig& output_config,
|
|
|
|
|
int16_t* const dest) {
|
2020-03-17 13:23:58 +01:00
|
|
|
TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame");
|
2020-03-19 12:33:29 +01:00
|
|
|
|
|
|
|
|
if (input_config.num_channels() <= 0) {
|
|
|
|
|
return AudioProcessing::Error::kBadNumberChannelsError;
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock(&mutex_render_);
|
2015-11-28 12:35:15 -08:00
|
|
|
ProcessingConfig processing_config = formats_.api_format;
|
2015-08-14 10:35:55 -07:00
|
|
|
processing_config.reverse_input_stream().set_sample_rate_hz(
|
2020-03-16 12:06:02 +01:00
|
|
|
input_config.sample_rate_hz());
|
2015-08-14 10:35:55 -07:00
|
|
|
processing_config.reverse_input_stream().set_num_channels(
|
2020-03-16 12:06:02 +01:00
|
|
|
input_config.num_channels());
|
2015-08-14 10:35:55 -07:00
|
|
|
processing_config.reverse_output_stream().set_sample_rate_hz(
|
2020-03-16 12:06:02 +01:00
|
|
|
output_config.sample_rate_hz());
|
2015-08-14 10:35:55 -07:00
|
|
|
processing_config.reverse_output_stream().set_num_channels(
|
2020-03-16 12:06:02 +01:00
|
|
|
output_config.num_channels());
|
2015-07-23 11:41:39 -07:00
|
|
|
|
2015-11-28 12:35:15 -08:00
|
|
|
RETURN_ON_ERR(MaybeInitializeRender(processing_config));
|
2020-03-16 12:06:02 +01:00
|
|
|
if (input_config.num_frames() !=
|
2015-11-28 12:35:15 -08:00
|
|
|
formats_.api_format.reverse_input_stream().num_frames()) {
|
2014-03-04 20:58:13 +00:00
|
|
|
return kBadDataLengthError;
|
|
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
if (aec_dump_) {
|
2020-03-16 12:06:02 +01:00
|
|
|
aec_dump_->WriteRenderStreamMessage(src, input_config.num_frames(),
|
|
|
|
|
input_config.num_channels());
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
}
|
|
|
|
|
|
2020-03-16 12:06:02 +01:00
|
|
|
render_.render_audio->CopyFrom(src, input_config);
|
2016-09-16 15:02:15 -07:00
|
|
|
RETURN_ON_ERR(ProcessRenderStreamLocked());
|
2019-08-15 12:15:46 +02:00
|
|
|
if (submodule_states_.RenderMultiBandProcessingActive() ||
|
|
|
|
|
submodule_states_.RenderFullBandProcessingActive()) {
|
2020-03-16 12:06:02 +01:00
|
|
|
render_.render_audio->CopyTo(output_config, dest);
|
2019-08-15 12:15:46 +02:00
|
|
|
}
|
2016-03-17 20:39:53 -07:00
|
|
|
return kNoError;
|
2014-03-04 20:58:13 +00:00
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
|
2016-09-16 15:02:15 -07:00
|
|
|
int AudioProcessingImpl::ProcessRenderStreamLocked() {
|
|
|
|
|
AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity.
|
2017-05-15 07:19:21 -07:00
|
|
|
|
2018-05-15 10:52:28 +02:00
|
|
|
HandleRenderRuntimeSettings();
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.render_pre_processor) {
|
|
|
|
|
submodules_.render_pre_processor->Process(render_buffer);
|
2017-12-18 16:02:40 +01:00
|
|
|
}
|
|
|
|
|
|
2018-08-09 14:23:11 +02:00
|
|
|
QueueNonbandedRenderAudio(render_buffer);
|
|
|
|
|
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
if (submodule_states_.RenderMultiBandSubModulesActive() &&
|
2016-09-16 15:02:15 -07:00
|
|
|
SampleRateSupportsMultiBand(
|
|
|
|
|
formats_.render_processing_format.sample_rate_hz())) {
|
|
|
|
|
render_buffer->SplitIntoFrequencyBands();
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2017-05-19 01:28:05 -07:00
|
|
|
if (submodule_states_.RenderMultiBandSubModulesActive()) {
|
|
|
|
|
QueueBandedRenderAudio(render_buffer);
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-09 14:23:11 +02:00
|
|
|
// TODO(peah): Perform the queuing inside QueueRenderAudiuo().
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.echo_controller) {
|
|
|
|
|
submodules_.echo_controller->AnalyzeRender(render_buffer);
|
2016-12-14 01:16:23 -08:00
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
if (submodule_states_.RenderMultiBandProcessingActive() &&
|
2016-09-16 15:02:15 -07:00
|
|
|
SampleRateSupportsMultiBand(
|
|
|
|
|
formats_.render_processing_format.sample_rate_hz())) {
|
|
|
|
|
render_buffer->MergeFrequencyBands();
|
2015-08-14 10:35:55 -07:00
|
|
|
}
|
|
|
|
|
|
2014-03-04 20:58:13 +00:00
|
|
|
return kNoError;
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int AudioProcessingImpl::set_stream_delay_ms(int delay) {
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock(&mutex_capture_);
|
2012-05-29 21:14:06 +00:00
|
|
|
Error retval = kNoError;
|
2015-11-28 12:35:15 -08:00
|
|
|
capture_.was_stream_delay_set = true;
|
2012-03-06 19:03:39 +00:00
|
|
|
|
2011-07-07 08:21:25 +00:00
|
|
|
if (delay < 0) {
|
2012-05-29 21:14:06 +00:00
|
|
|
delay = 0;
|
|
|
|
|
retval = kBadStreamParameterWarning;
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TODO(ajm): the max is rather arbitrarily chosen; investigate.
|
|
|
|
|
if (delay > 500) {
|
2012-05-29 21:14:06 +00:00
|
|
|
delay = 500;
|
|
|
|
|
retval = kBadStreamParameterWarning;
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2015-11-28 12:35:15 -08:00
|
|
|
capture_nonlocked_.stream_delay_ms = delay;
|
2012-05-29 21:14:06 +00:00
|
|
|
return retval;
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2019-11-13 11:12:29 +01:00
|
|
|
bool AudioProcessingImpl::GetLinearAecOutput(
|
|
|
|
|
rtc::ArrayView<std::array<float, 160>> linear_output) const {
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock(&mutex_capture_);
|
2019-11-13 11:12:29 +01:00
|
|
|
AudioBuffer* linear_aec_buffer = capture_.linear_aec_output.get();
|
|
|
|
|
|
|
|
|
|
RTC_DCHECK(linear_aec_buffer);
|
|
|
|
|
if (linear_aec_buffer) {
|
|
|
|
|
RTC_DCHECK_EQ(1, linear_aec_buffer->num_bands());
|
|
|
|
|
RTC_DCHECK_EQ(linear_output.size(), linear_aec_buffer->num_channels());
|
|
|
|
|
|
|
|
|
|
for (size_t ch = 0; ch < linear_aec_buffer->num_channels(); ++ch) {
|
|
|
|
|
RTC_DCHECK_EQ(linear_output[ch].size(), linear_aec_buffer->num_frames());
|
|
|
|
|
rtc::ArrayView<const float> channel_view =
|
|
|
|
|
rtc::ArrayView<const float>(linear_aec_buffer->channels_const()[ch],
|
|
|
|
|
linear_aec_buffer->num_frames());
|
2020-11-13 14:30:30 +01:00
|
|
|
FloatS16ToFloat(channel_view.data(), channel_view.size(),
|
|
|
|
|
linear_output[ch].data());
|
2019-11-13 11:12:29 +01:00
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
RTC_LOG(LS_ERROR) << "No linear AEC output available";
|
|
|
|
|
RTC_NOTREACHED();
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2011-07-07 08:21:25 +00:00
|
|
|
int AudioProcessingImpl::stream_delay_ms() const {
|
2015-11-28 12:35:15 -08:00
|
|
|
// Used as callback from submodules, hence locking is not allowed.
|
|
|
|
|
return capture_nonlocked_.stream_delay_ms;
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2014-03-04 20:58:13 +00:00
|
|
|
void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock(&mutex_capture_);
|
2015-11-28 12:35:15 -08:00
|
|
|
capture_.key_pressed = key_pressed;
|
2014-03-04 20:58:13 +00:00
|
|
|
}
|
|
|
|
|
|
2019-03-27 13:28:08 +01:00
|
|
|
void AudioProcessingImpl::set_stream_analog_level(int level) {
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
2019-11-18 08:52:22 +01:00
|
|
|
|
2021-03-15 16:31:04 +00:00
|
|
|
if (config_.capture_level_adjustment.analog_mic_gain_emulation.enabled) {
|
|
|
|
|
// If the analog mic gain is emulated internally, simply cache the level for
|
|
|
|
|
// later reporting back as the recommended stream analog level to use.
|
|
|
|
|
capture_.cached_stream_analog_level_ = level;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-18 08:52:22 +01:00
|
|
|
if (submodules_.agc_manager) {
|
|
|
|
|
submodules_.agc_manager->set_stream_analog_level(level);
|
|
|
|
|
data_dumper_->DumpRaw("experimental_gain_control_set_stream_analog_level",
|
|
|
|
|
1, &level);
|
2021-03-15 16:31:04 +00:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (submodules_.gain_control) {
|
2019-11-18 08:52:22 +01:00
|
|
|
int error = submodules_.gain_control->set_stream_analog_level(level);
|
|
|
|
|
RTC_DCHECK_EQ(kNoError, error);
|
2021-03-15 16:31:04 +00:00
|
|
|
return;
|
2019-11-18 08:52:22 +01:00
|
|
|
}
|
2021-03-15 16:31:04 +00:00
|
|
|
|
|
|
|
|
// If no analog mic gain control functionality is in place, cache the level
|
|
|
|
|
// for later reporting back as the recommended stream analog level to use.
|
|
|
|
|
capture_.cached_stream_analog_level_ = level;
|
2019-03-27 13:28:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int AudioProcessingImpl::recommended_stream_analog_level() const {
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
2020-05-14 14:31:18 +02:00
|
|
|
return recommended_stream_analog_level_locked();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int AudioProcessingImpl::recommended_stream_analog_level_locked() const {
|
2021-03-15 16:31:04 +00:00
|
|
|
if (config_.capture_level_adjustment.analog_mic_gain_emulation.enabled) {
|
|
|
|
|
return capture_.cached_stream_analog_level_;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-18 08:52:22 +01:00
|
|
|
if (submodules_.agc_manager) {
|
|
|
|
|
return submodules_.agc_manager->stream_analog_level();
|
2021-03-15 16:31:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (submodules_.gain_control) {
|
2020-01-13 14:43:13 +01:00
|
|
|
return submodules_.gain_control->stream_analog_level();
|
2019-11-18 08:52:22 +01:00
|
|
|
}
|
2021-03-15 16:31:04 +00:00
|
|
|
|
|
|
|
|
return capture_.cached_stream_analog_level_;
|
2019-03-27 13:28:08 +01:00
|
|
|
}
|
|
|
|
|
|
2020-05-11 11:03:47 +02:00
|
|
|
bool AudioProcessingImpl::CreateAndAttachAecDump(const std::string& file_name,
|
|
|
|
|
int64_t max_log_size_bytes,
|
|
|
|
|
rtc::TaskQueue* worker_queue) {
|
|
|
|
|
std::unique_ptr<AecDump> aec_dump =
|
|
|
|
|
AecDumpFactory::Create(file_name, max_log_size_bytes, worker_queue);
|
|
|
|
|
if (!aec_dump) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
AttachAecDump(std::move(aec_dump));
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool AudioProcessingImpl::CreateAndAttachAecDump(FILE* handle,
|
|
|
|
|
int64_t max_log_size_bytes,
|
|
|
|
|
rtc::TaskQueue* worker_queue) {
|
|
|
|
|
std::unique_ptr<AecDump> aec_dump =
|
|
|
|
|
AecDumpFactory::Create(handle, max_log_size_bytes, worker_queue);
|
|
|
|
|
if (!aec_dump) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
AttachAecDump(std::move(aec_dump));
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
void AudioProcessingImpl::AttachAecDump(std::unique_ptr<AecDump> aec_dump) {
|
|
|
|
|
RTC_DCHECK(aec_dump);
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_render(&mutex_render_);
|
|
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
|
|
|
|
|
// The previously attached AecDump will be destroyed with the
|
|
|
|
|
// 'aec_dump' parameter, which is after locks are released.
|
|
|
|
|
aec_dump_.swap(aec_dump);
|
|
|
|
|
WriteAecDumpConfigMessage(true);
|
2018-08-10 15:38:52 +02:00
|
|
|
aec_dump_->WriteInitMessage(formats_.api_format, rtc::TimeUTCMillis());
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioProcessingImpl::DetachAecDump() {
|
|
|
|
|
// The d-tor of a task-queue based AecDump blocks until all pending
|
|
|
|
|
// tasks are done. This construction avoids blocking while holding
|
|
|
|
|
// the render and capture locks.
|
|
|
|
|
std::unique_ptr<AecDump> aec_dump = nullptr;
|
|
|
|
|
{
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_render(&mutex_render_);
|
|
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
aec_dump = std::move(aec_dump_);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-22 07:24:52 -08:00
|
|
|
AudioProcessing::Config AudioProcessingImpl::GetConfig() const {
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_render(&mutex_render_);
|
|
|
|
|
MutexLock lock_capture(&mutex_capture_);
|
2016-11-22 07:24:52 -08:00
|
|
|
return config_;
|
|
|
|
|
}
|
|
|
|
|
|
The audio processing module (APM) relies on two for
functionalities doing sample-rate conversions:
-The implicit resampling done in the AudioBuffer CopyTo,
CopyFrom, InterleaveTo and DeinterleaveFrom methods.
-The multi-band splitting scheme.
The selection of rates in these have been difficult and
complicated, partly due to that the APM API which allows
for activating the APM submodules without notifying
the APM.
This CL adds functionality that for each capture frame
polls all submodules for whether they are active or not
and compares this against a cached result.
Furthermore, new functionality is added that based on the
results of the comparison do a reinitialization of the APM.
This has several advantages
-The code deciding on whether to analysis and synthesis is
needed for the bandsplitting can be much simplified and
centralized.
-The selection of the processing rate can be done such as
to avoid the implicit resampling that was in some cases
unnecessarily done.
-The optimization for whether an output copy is needed
that was done to improve performance due to the implicit
resampling is no longer needed, which simplifies the
code and makes it less error-prone in the sense that
is no longer neccessary to keep track of whether any
module has changed the signal.
Finally, it should be noted that the polling of the state
for all the submodules was done previously as well, but in
a less obvious and distributed manner.
BUG=webrtc:6181, webrtc:6220, webrtc:5298, webrtc:6296, webrtc:6298, webrtc:6297
Review-Url: https://codereview.webrtc.org/2304123002
Cr-Commit-Position: refs/heads/master@{#14175}
2016-09-10 04:42:27 -07:00
|
|
|
bool AudioProcessingImpl::UpdateActiveSubmoduleStates() {
|
|
|
|
|
return submodule_states_.Update(
|
2019-12-09 10:18:44 +01:00
|
|
|
config_.high_pass_filter.enabled, !!submodules_.echo_control_mobile,
|
2020-01-30 07:40:58 +01:00
|
|
|
config_.residual_echo_detector.enabled, !!submodules_.noise_suppressor,
|
2020-01-13 14:43:13 +01:00
|
|
|
!!submodules_.gain_control, !!submodules_.gain_controller2,
|
2021-03-15 16:31:04 +00:00
|
|
|
config_.pre_amplifier.enabled || config_.capture_level_adjustment.enabled,
|
|
|
|
|
capture_nonlocked_.echo_controller_enabled,
|
2020-01-02 15:15:36 +01:00
|
|
|
config_.voice_detection.enabled, !!submodules_.transient_suppressor);
|
2015-08-14 10:35:55 -07:00
|
|
|
}
|
|
|
|
|
|
2020-01-02 15:15:36 +01:00
|
|
|
void AudioProcessingImpl::InitializeTransientSuppressor() {
|
|
|
|
|
if (config_.transient_suppression.enabled) {
|
2020-04-01 15:24:40 +02:00
|
|
|
// Attempt to create a transient suppressor, if one is not already created.
|
2020-01-02 15:15:36 +01:00
|
|
|
if (!submodules_.transient_suppressor) {
|
2020-04-27 08:39:33 +02:00
|
|
|
submodules_.transient_suppressor =
|
|
|
|
|
CreateTransientSuppressor(submodule_creation_overrides_);
|
2020-04-01 15:24:40 +02:00
|
|
|
}
|
|
|
|
|
if (submodules_.transient_suppressor) {
|
|
|
|
|
submodules_.transient_suppressor->Initialize(
|
|
|
|
|
proc_fullband_sample_rate_hz(), capture_nonlocked_.split_rate,
|
|
|
|
|
num_proc_channels());
|
|
|
|
|
} else {
|
|
|
|
|
RTC_LOG(LS_WARNING)
|
|
|
|
|
<< "No transient suppressor created (probably disabled)";
|
2014-12-15 09:41:24 +00:00
|
|
|
}
|
2020-01-02 15:15:36 +01:00
|
|
|
} else {
|
|
|
|
|
submodules_.transient_suppressor.reset();
|
2014-12-15 09:41:24 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-03 14:27:14 +01:00
|
|
|
void AudioProcessingImpl::InitializeHighPassFilter(bool forced_reset) {
|
2019-12-04 08:34:12 +01:00
|
|
|
bool high_pass_filter_needed_by_aec =
|
|
|
|
|
config_.echo_canceller.enabled &&
|
|
|
|
|
config_.echo_canceller.enforce_high_pass_filtering &&
|
|
|
|
|
!config_.echo_canceller.mobile_mode;
|
|
|
|
|
if (submodule_states_.HighPassFilteringRequired() ||
|
|
|
|
|
high_pass_filter_needed_by_aec) {
|
2019-12-10 13:04:15 +01:00
|
|
|
bool use_full_band = config_.high_pass_filter.apply_in_full_band &&
|
|
|
|
|
!constants_.enforce_split_band_hpf;
|
|
|
|
|
int rate = use_full_band ? proc_fullband_sample_rate_hz()
|
|
|
|
|
: proc_split_sample_rate_hz();
|
|
|
|
|
size_t num_channels =
|
|
|
|
|
use_full_band ? num_output_channels() : num_proc_channels();
|
|
|
|
|
|
2020-01-03 14:27:14 +01:00
|
|
|
if (!submodules_.high_pass_filter ||
|
|
|
|
|
rate != submodules_.high_pass_filter->sample_rate_hz() ||
|
|
|
|
|
forced_reset ||
|
|
|
|
|
num_channels != submodules_.high_pass_filter->num_channels()) {
|
|
|
|
|
submodules_.high_pass_filter.reset(
|
|
|
|
|
new HighPassFilter(rate, num_channels));
|
|
|
|
|
}
|
2016-11-22 07:24:52 -08:00
|
|
|
} else {
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.high_pass_filter.reset();
|
2016-11-22 07:24:52 -08:00
|
|
|
}
|
|
|
|
|
}
|
2017-05-22 06:57:06 -07:00
|
|
|
|
2019-10-07 14:03:56 +02:00
|
|
|
void AudioProcessingImpl::InitializeVoiceDetector() {
|
|
|
|
|
if (config_.voice_detection.enabled) {
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.voice_detector = std::make_unique<VoiceDetection>(
|
2019-10-07 14:03:56 +02:00
|
|
|
proc_split_sample_rate_hz(), VoiceDetection::kVeryLowLikelihood);
|
|
|
|
|
} else {
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.voice_detector.reset();
|
2019-10-07 14:03:56 +02:00
|
|
|
}
|
|
|
|
|
}
|
2017-10-14 08:28:46 +02:00
|
|
|
void AudioProcessingImpl::InitializeEchoController() {
|
2019-04-29 12:14:50 +02:00
|
|
|
bool use_echo_controller =
|
|
|
|
|
echo_control_factory_ ||
|
2019-12-09 10:18:44 +01:00
|
|
|
(config_.echo_canceller.enabled && !config_.echo_canceller.mobile_mode);
|
2019-04-29 12:14:50 +02:00
|
|
|
|
|
|
|
|
if (use_echo_controller) {
|
|
|
|
|
// Create and activate the echo controller.
|
2019-03-06 04:16:46 +01:00
|
|
|
if (echo_control_factory_) {
|
2019-11-01 20:44:11 +01:00
|
|
|
submodules_.echo_controller = echo_control_factory_->Create(
|
|
|
|
|
proc_sample_rate_hz(), num_reverse_channels(), num_proc_channels());
|
2019-11-07 17:15:12 +01:00
|
|
|
RTC_DCHECK(submodules_.echo_controller);
|
2019-03-06 04:16:46 +01:00
|
|
|
} else {
|
2019-12-02 14:59:40 +01:00
|
|
|
EchoCanceller3Config config =
|
|
|
|
|
use_setup_specific_default_aec3_config_
|
|
|
|
|
? EchoCanceller3::CreateDefaultConfig(num_reverse_channels(),
|
|
|
|
|
num_proc_channels())
|
|
|
|
|
: EchoCanceller3Config();
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.echo_controller = std::make_unique<EchoCanceller3>(
|
2019-12-02 14:59:40 +01:00
|
|
|
config, proc_sample_rate_hz(), num_reverse_channels(),
|
2019-09-20 07:50:35 +02:00
|
|
|
num_proc_channels());
|
2019-03-06 04:16:46 +01:00
|
|
|
}
|
|
|
|
|
|
2019-11-13 11:12:29 +01:00
|
|
|
// Setup the storage for returning the linear AEC output.
|
|
|
|
|
if (config_.echo_canceller.export_linear_aec_output) {
|
|
|
|
|
constexpr int kLinearOutputRateHz = 16000;
|
|
|
|
|
capture_.linear_aec_output = std::make_unique<AudioBuffer>(
|
|
|
|
|
kLinearOutputRateHz, num_proc_channels(), kLinearOutputRateHz,
|
|
|
|
|
num_proc_channels(), kLinearOutputRateHz, num_proc_channels());
|
|
|
|
|
} else {
|
|
|
|
|
capture_.linear_aec_output.reset();
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-06 04:16:46 +01:00
|
|
|
capture_nonlocked_.echo_controller_enabled = true;
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.echo_control_mobile.reset();
|
2019-04-29 12:14:50 +02:00
|
|
|
aecm_render_signal_queue_.reset();
|
2019-04-25 15:18:06 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.echo_controller.reset();
|
2019-04-25 15:18:06 +02:00
|
|
|
capture_nonlocked_.echo_controller_enabled = false;
|
2019-11-13 11:12:29 +01:00
|
|
|
capture_.linear_aec_output.reset();
|
2019-04-25 15:18:06 +02:00
|
|
|
|
|
|
|
|
if (!config_.echo_canceller.enabled) {
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.echo_control_mobile.reset();
|
2019-04-29 12:14:50 +02:00
|
|
|
aecm_render_signal_queue_.reset();
|
2019-04-25 15:18:06 +02:00
|
|
|
return;
|
|
|
|
|
}
|
2019-03-06 04:16:46 +01:00
|
|
|
|
2019-04-25 15:18:06 +02:00
|
|
|
if (config_.echo_canceller.mobile_mode) {
|
|
|
|
|
// Create and activate AECM.
|
2019-04-29 12:14:50 +02:00
|
|
|
size_t max_element_size =
|
|
|
|
|
std::max(static_cast<size_t>(1),
|
|
|
|
|
kMaxAllowedValuesOfSamplesPerBand *
|
|
|
|
|
EchoControlMobileImpl::NumCancellersRequired(
|
|
|
|
|
num_output_channels(), num_reverse_channels()));
|
|
|
|
|
|
|
|
|
|
std::vector<int16_t> template_queue_element(max_element_size);
|
|
|
|
|
|
|
|
|
|
aecm_render_signal_queue_.reset(
|
|
|
|
|
new SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>(
|
|
|
|
|
kMaxNumFramesToBuffer, template_queue_element,
|
|
|
|
|
RenderQueueItemVerifier<int16_t>(max_element_size)));
|
|
|
|
|
|
|
|
|
|
aecm_render_queue_buffer_.resize(max_element_size);
|
|
|
|
|
aecm_capture_queue_buffer_.resize(max_element_size);
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.echo_control_mobile.reset(new EchoControlMobileImpl());
|
2019-04-29 12:14:50 +02:00
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.echo_control_mobile->Initialize(proc_split_sample_rate_hz(),
|
|
|
|
|
num_reverse_channels(),
|
|
|
|
|
num_output_channels());
|
2019-04-25 15:18:06 +02:00
|
|
|
return;
|
2016-12-14 01:16:23 -08:00
|
|
|
}
|
2019-04-25 15:18:06 +02:00
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.echo_control_mobile.reset();
|
2019-04-29 12:14:50 +02:00
|
|
|
aecm_render_signal_queue_.reset();
|
2016-12-14 01:16:23 -08:00
|
|
|
}
|
2016-11-22 07:24:52 -08:00
|
|
|
|
2020-01-13 14:43:13 +01:00
|
|
|
void AudioProcessingImpl::InitializeGainController1() {
|
|
|
|
|
if (!config_.gain_controller1.enabled) {
|
|
|
|
|
submodules_.agc_manager.reset();
|
|
|
|
|
submodules_.gain_control.reset();
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!submodules_.gain_control) {
|
|
|
|
|
submodules_.gain_control.reset(new GainControlImpl());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
submodules_.gain_control->Initialize(num_proc_channels(),
|
|
|
|
|
proc_sample_rate_hz());
|
|
|
|
|
|
|
|
|
|
if (!config_.gain_controller1.analog_gain_controller.enabled) {
|
|
|
|
|
int error = submodules_.gain_control->set_mode(
|
|
|
|
|
Agc1ConfigModeToInterfaceMode(config_.gain_controller1.mode));
|
|
|
|
|
RTC_DCHECK_EQ(kNoError, error);
|
|
|
|
|
error = submodules_.gain_control->set_target_level_dbfs(
|
|
|
|
|
config_.gain_controller1.target_level_dbfs);
|
|
|
|
|
RTC_DCHECK_EQ(kNoError, error);
|
|
|
|
|
error = submodules_.gain_control->set_compression_gain_db(
|
|
|
|
|
config_.gain_controller1.compression_gain_db);
|
|
|
|
|
RTC_DCHECK_EQ(kNoError, error);
|
|
|
|
|
error = submodules_.gain_control->enable_limiter(
|
|
|
|
|
config_.gain_controller1.enable_limiter);
|
|
|
|
|
RTC_DCHECK_EQ(kNoError, error);
|
|
|
|
|
error = submodules_.gain_control->set_analog_level_limits(
|
|
|
|
|
config_.gain_controller1.analog_level_minimum,
|
|
|
|
|
config_.gain_controller1.analog_level_maximum);
|
|
|
|
|
RTC_DCHECK_EQ(kNoError, error);
|
|
|
|
|
|
|
|
|
|
submodules_.agc_manager.reset();
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!submodules_.agc_manager.get() ||
|
|
|
|
|
submodules_.agc_manager->num_channels() !=
|
|
|
|
|
static_cast<int>(num_proc_channels()) ||
|
|
|
|
|
submodules_.agc_manager->sample_rate_hz() !=
|
|
|
|
|
capture_nonlocked_.split_rate) {
|
|
|
|
|
int stream_analog_level = -1;
|
|
|
|
|
const bool re_creation = !!submodules_.agc_manager;
|
|
|
|
|
if (re_creation) {
|
|
|
|
|
stream_analog_level = submodules_.agc_manager->stream_analog_level();
|
|
|
|
|
}
|
|
|
|
|
submodules_.agc_manager.reset(new AgcManagerDirect(
|
|
|
|
|
num_proc_channels(),
|
|
|
|
|
config_.gain_controller1.analog_gain_controller.startup_min_volume,
|
|
|
|
|
config_.gain_controller1.analog_gain_controller.clipped_level_min,
|
|
|
|
|
!config_.gain_controller1.analog_gain_controller
|
|
|
|
|
.enable_digital_adaptive,
|
|
|
|
|
capture_nonlocked_.split_rate));
|
|
|
|
|
if (re_creation) {
|
|
|
|
|
submodules_.agc_manager->set_stream_analog_level(stream_analog_level);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
submodules_.agc_manager->Initialize();
|
|
|
|
|
submodules_.agc_manager->SetupDigitalGainControl(
|
|
|
|
|
submodules_.gain_control.get());
|
2021-02-09 08:47:51 +01:00
|
|
|
submodules_.agc_manager->HandleCaptureOutputUsedChange(
|
|
|
|
|
capture_.capture_output_used);
|
2020-01-13 14:43:13 +01:00
|
|
|
}
|
|
|
|
|
|
2017-05-22 06:57:06 -07:00
|
|
|
void AudioProcessingImpl::InitializeGainController2() {
|
2017-10-13 11:05:17 +02:00
|
|
|
if (config_.gain_controller2.enabled) {
|
2020-01-03 10:36:34 +01:00
|
|
|
if (!submodules_.gain_controller2) {
|
|
|
|
|
// TODO(alessiob): Move the injected gain controller once injection is
|
|
|
|
|
// implemented.
|
|
|
|
|
submodules_.gain_controller2.reset(new GainController2());
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.gain_controller2->Initialize(proc_fullband_sample_rate_hz());
|
2020-01-03 10:36:34 +01:00
|
|
|
submodules_.gain_controller2->ApplyConfig(config_.gain_controller2);
|
|
|
|
|
} else {
|
|
|
|
|
submodules_.gain_controller2.reset();
|
2017-05-22 06:57:06 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-16 11:46:11 +02:00
|
|
|
void AudioProcessingImpl::InitializeNoiseSuppressor() {
|
2019-10-29 22:59:44 +01:00
|
|
|
submodules_.noise_suppressor.reset();
|
|
|
|
|
|
2019-10-16 11:46:11 +02:00
|
|
|
if (config_.noise_suppression.enabled) {
|
2020-04-01 15:24:40 +02:00
|
|
|
auto map_level =
|
|
|
|
|
[](AudioProcessing::Config::NoiseSuppression::Level level) {
|
|
|
|
|
using NoiseSuppresionConfig =
|
|
|
|
|
AudioProcessing::Config::NoiseSuppression;
|
|
|
|
|
switch (level) {
|
|
|
|
|
case NoiseSuppresionConfig::kLow:
|
|
|
|
|
return NsConfig::SuppressionLevel::k6dB;
|
|
|
|
|
case NoiseSuppresionConfig::kModerate:
|
|
|
|
|
return NsConfig::SuppressionLevel::k12dB;
|
|
|
|
|
case NoiseSuppresionConfig::kHigh:
|
|
|
|
|
return NsConfig::SuppressionLevel::k18dB;
|
|
|
|
|
case NoiseSuppresionConfig::kVeryHigh:
|
|
|
|
|
return NsConfig::SuppressionLevel::k21dB;
|
|
|
|
|
}
|
2020-11-08 00:49:37 +01:00
|
|
|
RTC_CHECK_NOTREACHED();
|
2020-04-01 15:24:40 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
NsConfig cfg;
|
|
|
|
|
cfg.target_level = map_level(config_.noise_suppression.level);
|
|
|
|
|
submodules_.noise_suppressor = std::make_unique<NoiseSuppressor>(
|
|
|
|
|
cfg, proc_sample_rate_hz(), num_proc_channels());
|
2019-10-16 11:46:11 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-15 16:31:04 +00:00
|
|
|
void AudioProcessingImpl::InitializeCaptureLevelsAdjuster() {
|
|
|
|
|
if (config_.pre_amplifier.enabled ||
|
|
|
|
|
config_.capture_level_adjustment.enabled) {
|
|
|
|
|
// Use both the pre-amplifier and the capture level adjustment gains as
|
|
|
|
|
// pre-gains.
|
|
|
|
|
float pre_gain = 1.f;
|
|
|
|
|
if (config_.pre_amplifier.enabled) {
|
|
|
|
|
pre_gain *= config_.pre_amplifier.fixed_gain_factor;
|
|
|
|
|
}
|
|
|
|
|
if (config_.capture_level_adjustment.enabled) {
|
|
|
|
|
pre_gain *= config_.capture_level_adjustment.pre_gain_factor;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
submodules_.capture_levels_adjuster =
|
|
|
|
|
std::make_unique<CaptureLevelsAdjuster>(
|
|
|
|
|
config_.capture_level_adjustment.analog_mic_gain_emulation.enabled,
|
|
|
|
|
config_.capture_level_adjustment.analog_mic_gain_emulation
|
|
|
|
|
.initial_level,
|
|
|
|
|
pre_gain, config_.capture_level_adjustment.post_gain_factor);
|
2018-04-16 16:31:22 +02:00
|
|
|
} else {
|
2021-03-15 16:31:04 +00:00
|
|
|
submodules_.capture_levels_adjuster.reset();
|
2018-04-16 16:31:22 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-28 05:39:16 -07:00
|
|
|
void AudioProcessingImpl::InitializeResidualEchoDetector() {
|
2019-10-18 13:29:43 +02:00
|
|
|
RTC_DCHECK(submodules_.echo_detector);
|
|
|
|
|
submodules_.echo_detector->Initialize(
|
2019-10-09 13:02:14 +02:00
|
|
|
proc_fullband_sample_rate_hz(), 1,
|
2018-04-12 16:15:58 +02:00
|
|
|
formats_.render_processing_format.sample_rate_hz(), 1);
|
2016-10-28 05:39:16 -07:00
|
|
|
}
|
|
|
|
|
|
2018-08-29 10:37:09 +02:00
|
|
|
void AudioProcessingImpl::InitializeAnalyzer() {
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.capture_analyzer) {
|
|
|
|
|
submodules_.capture_analyzer->Initialize(proc_fullband_sample_rate_hz(),
|
|
|
|
|
num_proc_channels());
|
2018-08-29 10:37:09 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-25 12:04:02 +02:00
|
|
|
void AudioProcessingImpl::InitializePostProcessor() {
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.capture_post_processor) {
|
|
|
|
|
submodules_.capture_post_processor->Initialize(
|
2019-10-09 13:02:14 +02:00
|
|
|
proc_fullband_sample_rate_hz(), num_proc_channels());
|
2017-09-25 12:04:02 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-18 16:02:40 +01:00
|
|
|
void AudioProcessingImpl::InitializePreProcessor() {
|
2019-10-18 13:29:43 +02:00
|
|
|
if (submodules_.render_pre_processor) {
|
|
|
|
|
submodules_.render_pre_processor->Initialize(
|
2017-12-18 16:02:40 +01:00
|
|
|
formats_.render_processing_format.sample_rate_hz(),
|
|
|
|
|
formats_.render_processing_format.num_channels());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
void AudioProcessingImpl::WriteAecDumpConfigMessage(bool forced) {
|
|
|
|
|
if (!aec_dump_) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
2019-04-25 15:18:06 +02:00
|
|
|
|
|
|
|
|
std::string experiments_description = "";
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
// TODO(peah): Add semicolon-separated concatenations of experiment
|
|
|
|
|
// descriptions for other submodules.
|
2020-01-13 14:43:13 +01:00
|
|
|
if (config_.gain_controller1.analog_gain_controller.clipped_level_min !=
|
|
|
|
|
kClippedLevelMin) {
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
experiments_description += "AgcClippingLevelExperiment;";
|
|
|
|
|
}
|
2020-02-18 14:50:28 +01:00
|
|
|
if (!!submodules_.capture_post_processor) {
|
|
|
|
|
experiments_description += "CapturePostProcessor;";
|
|
|
|
|
}
|
|
|
|
|
if (!!submodules_.render_pre_processor) {
|
|
|
|
|
experiments_description += "RenderPreProcessor;";
|
|
|
|
|
}
|
2017-10-16 13:49:04 +02:00
|
|
|
if (capture_nonlocked_.echo_controller_enabled) {
|
|
|
|
|
experiments_description += "EchoController;";
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
}
|
2017-10-13 11:05:17 +02:00
|
|
|
if (config_.gain_controller2.enabled) {
|
|
|
|
|
experiments_description += "GainController2;";
|
|
|
|
|
}
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
|
|
|
|
|
InternalAPMConfig apm_config;
|
|
|
|
|
|
2019-03-06 04:16:46 +01:00
|
|
|
apm_config.aec_enabled = config_.echo_canceller.enabled;
|
2019-12-09 10:18:44 +01:00
|
|
|
apm_config.aec_delay_agnostic_enabled = false;
|
|
|
|
|
apm_config.aec_extended_filter_enabled = false;
|
|
|
|
|
apm_config.aec_suppression_level = 0;
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
|
2019-10-18 13:29:43 +02:00
|
|
|
apm_config.aecm_enabled = !!submodules_.echo_control_mobile;
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
apm_config.aecm_comfort_noise_enabled =
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.echo_control_mobile &&
|
|
|
|
|
submodules_.echo_control_mobile->is_comfort_noise_enabled();
|
2019-04-29 12:14:50 +02:00
|
|
|
apm_config.aecm_routing_mode =
|
2019-10-18 13:29:43 +02:00
|
|
|
submodules_.echo_control_mobile
|
|
|
|
|
? static_cast<int>(submodules_.echo_control_mobile->routing_mode())
|
2019-04-29 12:14:50 +02:00
|
|
|
: 0;
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
|
2020-01-13 14:43:13 +01:00
|
|
|
apm_config.agc_enabled = !!submodules_.gain_control;
|
|
|
|
|
|
|
|
|
|
apm_config.agc_mode = submodules_.gain_control
|
|
|
|
|
? static_cast<int>(submodules_.gain_control->mode())
|
|
|
|
|
: GainControl::kAdaptiveAnalog;
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
apm_config.agc_limiter_enabled =
|
2020-01-13 14:43:13 +01:00
|
|
|
submodules_.gain_control ? submodules_.gain_control->is_limiter_enabled()
|
|
|
|
|
: false;
|
2019-11-18 08:52:22 +01:00
|
|
|
apm_config.noise_robust_agc_enabled = !!submodules_.agc_manager;
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
|
|
|
|
|
apm_config.hpf_enabled = config_.high_pass_filter.enabled;
|
|
|
|
|
|
2019-10-16 11:46:11 +02:00
|
|
|
apm_config.ns_enabled = config_.noise_suppression.enabled;
|
|
|
|
|
apm_config.ns_level = static_cast<int>(config_.noise_suppression.level);
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
|
|
|
|
|
apm_config.transient_suppression_enabled =
|
2020-01-02 15:15:36 +01:00
|
|
|
config_.transient_suppression.enabled;
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
apm_config.experiments_description = experiments_description;
|
2018-04-16 13:52:32 +02:00
|
|
|
apm_config.pre_amplifier_enabled = config_.pre_amplifier.enabled;
|
|
|
|
|
apm_config.pre_amplifier_fixed_gain_factor =
|
|
|
|
|
config_.pre_amplifier.fixed_gain_factor;
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
|
|
|
|
|
if (!forced && apm_config == apm_config_for_aec_dump_) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
aec_dump_->WriteConfig(apm_config);
|
|
|
|
|
apm_config_for_aec_dump_ = apm_config;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioProcessingImpl::RecordUnprocessedCaptureStream(
|
|
|
|
|
const float* const* src) {
|
|
|
|
|
RTC_DCHECK(aec_dump_);
|
|
|
|
|
WriteAecDumpConfigMessage(false);
|
|
|
|
|
|
|
|
|
|
const size_t channel_size = formats_.api_format.input_stream().num_frames();
|
|
|
|
|
const size_t num_channels = formats_.api_format.input_stream().num_channels();
|
|
|
|
|
aec_dump_->AddCaptureStreamInput(
|
2018-02-16 11:54:07 +01:00
|
|
|
AudioFrameView<const float>(src, num_channels, channel_size));
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
RecordAudioProcessingState();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioProcessingImpl::RecordUnprocessedCaptureStream(
|
2020-03-16 12:06:02 +01:00
|
|
|
const int16_t* const data,
|
|
|
|
|
const StreamConfig& config) {
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
RTC_DCHECK(aec_dump_);
|
|
|
|
|
WriteAecDumpConfigMessage(false);
|
|
|
|
|
|
2020-03-16 12:06:02 +01:00
|
|
|
aec_dump_->AddCaptureStreamInput(data, config.num_channels(),
|
|
|
|
|
config.num_frames());
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
RecordAudioProcessingState();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioProcessingImpl::RecordProcessedCaptureStream(
|
|
|
|
|
const float* const* processed_capture_stream) {
|
|
|
|
|
RTC_DCHECK(aec_dump_);
|
|
|
|
|
|
|
|
|
|
const size_t channel_size = formats_.api_format.output_stream().num_frames();
|
|
|
|
|
const size_t num_channels =
|
|
|
|
|
formats_.api_format.output_stream().num_channels();
|
2018-02-16 11:54:07 +01:00
|
|
|
aec_dump_->AddCaptureStreamOutput(AudioFrameView<const float>(
|
|
|
|
|
processed_capture_stream, num_channels, channel_size));
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
aec_dump_->WriteCaptureStreamMessage();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioProcessingImpl::RecordProcessedCaptureStream(
|
2020-03-16 12:06:02 +01:00
|
|
|
const int16_t* const data,
|
|
|
|
|
const StreamConfig& config) {
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
RTC_DCHECK(aec_dump_);
|
|
|
|
|
|
2020-03-16 12:06:02 +01:00
|
|
|
aec_dump_->AddCaptureStreamOutput(data, config.num_channels(),
|
2020-03-18 21:59:52 +01:00
|
|
|
config.num_frames());
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
aec_dump_->WriteCaptureStreamMessage();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioProcessingImpl::RecordAudioProcessingState() {
|
|
|
|
|
RTC_DCHECK(aec_dump_);
|
|
|
|
|
AecDump::AudioProcessingState audio_proc_state;
|
|
|
|
|
audio_proc_state.delay = capture_nonlocked_.stream_delay_ms;
|
2019-12-09 10:18:44 +01:00
|
|
|
audio_proc_state.drift = 0;
|
2020-05-14 14:31:18 +02:00
|
|
|
audio_proc_state.level = recommended_stream_analog_level_locked();
|
AudioProcessingModule has a feature to make a recording of its
configuration, inputs and outputs over a period of time. It is
activated by AudioProcessing::StartRecording. The data is stored in
binary protobuf format in a specified file. The file IO is, as of
this CL, done from the real-time audio thread.
This CL contains an interface for AecDump, a new APM submodule that
will handle the recordings. Calls to the new interface from the
AudioProcessingModule are added. These calls have no effect, and for a
short while, audio_processing_impl.cc will contain two copies of
recording calls.
The original calls are guarded by the WEBRTC_AUDIOPROC_DEBUG_DUMP
preprocessor define. They still have an effect, while the new ones do
not. In the following CLs, the old recording calls will be removed,
and an implementation of AecDump added.
The reasons for the refactoring is to move file IO operations from the
real-time audio thread, to add a top-level low-priority task queue for
logging tasks like this, to simplify and modularize audio_processing_impl.cc
and remove some of the preprocessor directives. These goals will be
archived by the upcoming CLs. The implementation is in
https://codereview.webrtc.org/2865113002.
BUG=webrtc:7404
Review-Url: https://codereview.webrtc.org/2778783002
Cr-Commit-Position: refs/heads/master@{#18233}
2017-05-23 07:20:05 -07:00
|
|
|
audio_proc_state.keypress = capture_.key_pressed;
|
|
|
|
|
aec_dump_->AddAudioProcessingState(audio_proc_state);
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-02 15:15:36 +01:00
|
|
|
AudioProcessingImpl::ApmCaptureState::ApmCaptureState()
|
2020-01-17 10:55:09 +01:00
|
|
|
: was_stream_delay_set(false),
|
2021-02-09 08:47:51 +01:00
|
|
|
capture_output_used(true),
|
2021-03-12 14:18:36 +00:00
|
|
|
capture_output_used_last_frame(true),
|
2016-08-29 14:46:07 -07:00
|
|
|
key_pressed(false),
|
2016-09-16 15:02:15 -07:00
|
|
|
capture_processing_format(kSampleRate16kHz),
|
2017-04-10 14:12:41 -07:00
|
|
|
split_rate(kSampleRate16kHz),
|
2018-07-16 17:08:41 +02:00
|
|
|
echo_path_gain_change(false),
|
2018-10-02 17:00:59 +02:00
|
|
|
prev_analog_mic_level(-1),
|
2021-03-15 16:31:04 +00:00
|
|
|
prev_pre_adjustment_gain(-1.f),
|
2019-05-10 15:50:02 +02:00
|
|
|
playout_volume(-1),
|
|
|
|
|
prev_playout_volume(-1) {}
|
2016-08-29 14:46:07 -07:00
|
|
|
|
|
|
|
|
AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default;
|
|
|
|
|
|
2019-08-15 12:15:46 +02:00
|
|
|
void AudioProcessingImpl::ApmCaptureState::KeyboardInfo::Extract(
|
|
|
|
|
const float* const* data,
|
|
|
|
|
const StreamConfig& stream_config) {
|
|
|
|
|
if (stream_config.has_keyboard()) {
|
|
|
|
|
keyboard_data = data[stream_config.num_channels()];
|
|
|
|
|
} else {
|
|
|
|
|
keyboard_data = NULL;
|
|
|
|
|
}
|
|
|
|
|
num_keyboard_frames = stream_config.num_frames();
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-29 14:46:07 -07:00
|
|
|
AudioProcessingImpl::ApmRenderState::ApmRenderState() = default;
|
|
|
|
|
|
|
|
|
|
AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default;
|
|
|
|
|
|
2019-12-30 14:32:14 +01:00
|
|
|
AudioProcessingImpl::ApmStatsReporter::ApmStatsReporter()
|
|
|
|
|
: stats_message_queue_(1) {}
|
|
|
|
|
|
|
|
|
|
AudioProcessingImpl::ApmStatsReporter::~ApmStatsReporter() = default;
|
|
|
|
|
|
|
|
|
|
AudioProcessingStats AudioProcessingImpl::ApmStatsReporter::GetStatistics() {
|
2020-07-07 15:53:34 +02:00
|
|
|
MutexLock lock_stats(&mutex_stats_);
|
2019-12-30 14:32:14 +01:00
|
|
|
bool new_stats_available = stats_message_queue_.Remove(&cached_stats_);
|
|
|
|
|
// If the message queue is full, return the cached stats.
|
|
|
|
|
static_cast<void>(new_stats_available);
|
|
|
|
|
|
|
|
|
|
return cached_stats_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioProcessingImpl::ApmStatsReporter::UpdateStatistics(
|
|
|
|
|
const AudioProcessingStats& new_stats) {
|
|
|
|
|
AudioProcessingStats stats_to_queue = new_stats;
|
|
|
|
|
bool stats_message_passed = stats_message_queue_.Insert(&stats_to_queue);
|
|
|
|
|
// If the message queue is full, discard the new stats.
|
|
|
|
|
static_cast<void>(stats_message_passed);
|
|
|
|
|
}
|
|
|
|
|
|
2011-07-07 08:21:25 +00:00
|
|
|
} // namespace webrtc
|