2015-10-16 14:35:07 -07:00
|
|
|
/*
|
|
|
|
|
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
|
|
|
|
*
|
|
|
|
|
* Use of this source code is governed by a BSD-style license
|
|
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
|
|
|
|
* in the file PATENTS. All contributing project authors may
|
|
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
|
|
|
*/
|
|
|
|
|
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "audio/audio_send_stream.h"
|
2019-07-05 19:08:33 +02:00
|
|
|
|
2019-09-17 17:06:18 +02:00
|
|
|
#include <memory>
|
2015-12-01 11:26:34 +01:00
|
|
|
#include <string>
|
2019-07-26 17:49:52 +02:00
|
|
|
#include <thread>
|
2017-04-27 02:08:52 -07:00
|
|
|
#include <utility>
|
2015-12-01 11:26:34 +01:00
|
|
|
#include <vector>
|
|
|
|
|
|
2019-03-22 12:59:48 +01:00
|
|
|
#include "api/task_queue/default_task_queue_factory.h"
|
2018-10-25 09:52:57 -07:00
|
|
|
#include "api/test/mock_frame_encryptor.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "audio/audio_state.h"
|
|
|
|
|
#include "audio/conversion.h"
|
2018-01-17 11:18:31 +01:00
|
|
|
#include "audio/mock_voe_channel_proxy.h"
|
2018-02-22 14:49:02 +01:00
|
|
|
#include "call/test/mock_rtp_transport_controller_send.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
|
Remove voe::TransmitMixer
TransmitMixer's functionality is moved into the AudioTransportProxy
owned by AudioState. This removes the need for an AudioTransport
implementation in VoEBaseImpl, which means that the proxy is no longer
a proxy, hence AudioTransportProxy is renamed to AudioTransportImpl.
In the short term, AudioState needs to know which AudioDeviceModule is
used, so it is added in AudioState::Config. AudioTransportImpl needs
to know which AudioSendStream:s are currently enabled to send, so
AudioState maintains a map of them, which is reduced into a simple
vector for AudioTransportImpl.
To encode and transmit audio,
AudioSendStream::OnAudioData(std::unique_ptr<AudioFrame> audio_frame)
is introduced, which is used in both the Chromium and standalone use
cases. This removes the need for two different instances of
voe::Channel::ProcessAndEncodeAudio(), so there is now only one,
taking an AudioFrame as argument. Callers need to allocate their own
AudioFrame:s, which is wasteful but not a regression since this was
already happening in the voe::Channel functions.
Most of the logic changed resides in
AudioTransportImpl::RecordedDataIsAvailable(), where two strange
things were found:
1. The clock drift parameter was ineffective since
apm->echo_cancellation()->enable_drift_compensation(false) is
called during initialization.
2. The output parameter 'new_mic_volume' was never set - instead it
was returned as a result, causing the ADM to never update the
analog mic gain
(https://cs.chromium.org/chromium/src/third_party/webrtc/voice_engine/voe_base_impl.cc?q=voe_base_impl.cc&dr&l=100).
Besides this, tests are updated, and some dead code is removed which
was found in the process.
Bug: webrtc:4690, webrtc:8591
Change-Id: I789d5296bf5efb7299a5ee05a4f3ce6abf9124b2
Reviewed-on: https://webrtc-review.googlesource.com/26681
Commit-Queue: Fredrik Solenberg <solenberg@webrtc.org>
Reviewed-by: Oskar Sundbom <ossu@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#21301}
2017-12-15 16:42:15 +01:00
|
|
|
#include "modules/audio_device/include/mock_audio_device.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "modules/audio_mixer/audio_mixer_impl.h"
|
[getStats] Implement "media-source" audio levels, fixing Chrome bug.
Implements RTCAudioSourceStats members:
- audioLevel
- totalAudioEnergy
- totalSamplesDuration
In this CL description these are collectively referred to as the audio
levels.
The audio levels are removed from sending "track" stats (in Chrome,
these are now reported as undefined instead of 0).
Background:
For sending tracks, audio levels were always reported as 0 in Chrome
(https://crbug.com/736403), while audio levels were correctly reported
for receiving tracks. This problem affected the standard getStats() but
not the legacy getStats(), blocking some people from migrating. This
was likely not a problem in native third_party/webrtc code because the
delivery of audio frames from device to send-stream uses a different
code path outside of chromium.
A recent PR (https://github.com/w3c/webrtc-stats/pull/451) moved the
send-side audio levels to the RTCAudioSourceStats, while keeping the
receive-side audio levels on the "track" stats. This allows an
implementation to report the audio levels even if samples are not sent
onto the network (such as if an ICE connection has not been established
yet), reflecting some of the current implementation.
Changes:
1. Audio levels are added to RTCAudioSourceStats. Send-side audio
"track" stats are left undefined. Receive-side audio "track" stats
are not changed in this CL and continue to work.
2. Audio level computation is moved from the AudioState and
AudioTransportImpl to the AudioSendStream. This is because a) the
AudioTransportImpl::RecordedDataIsAvailable() code path is not
exercised in chromium, and b) audio levels should, per-spec, not be
calculated on a per-call basis, for which the AudioState is defined.
3. The audio level computation is now performed in
AudioSendStream::SendAudioData(), a code path used by both native
and chromium code.
4. Comments are added to document behavior of existing code, such as
AudioLevel and AudioSendStream::SendAudioData().
Note:
In this CL, just like before this CL, audio level is only calculated
after an AudioSendStream has been created. This means that before an
O/A negotiation, audio levels are unavailable.
According to spec, if we have an audio source, we should have audio
levels. An immediate solution to this would have been to calculate the
audio level at pc/rtp_sender.cc. The problem is that the
LocalAudioSinkAdapter::OnData() code path, while exercised in chromium,
is not exercised in native code. The issue of calculating audio levels
on a per-source bases rather than on a per-send stream basis is left to
https://crbug.com/webrtc/10771, an existing "media-source" bug.
This CL can be verified manually in Chrome at:
https://codepen.io/anon/pen/vqRGyq
Bug: chromium:736403, webrtc:10771
Change-Id: I8036cd9984f3b187c3177470a8c0d6670a201a5a
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/143789
Reviewed-by: Oskar Sundbom <ossu@webrtc.org>
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#28480}
2019-07-03 17:11:10 +02:00
|
|
|
#include "modules/audio_mixer/sine_wave_generator.h"
|
2017-11-24 17:29:59 +01:00
|
|
|
#include "modules/audio_processing/include/audio_processing_statistics.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "modules/audio_processing/include/mock_audio_processing.h"
|
2018-02-22 14:49:02 +01:00
|
|
|
#include "modules/rtp_rtcp/mocks/mock_rtcp_bandwidth_observer.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
|
2019-03-22 12:59:48 +01:00
|
|
|
#include "rtc_base/task_queue_for_test.h"
|
2019-03-04 17:05:12 +01:00
|
|
|
#include "system_wrappers/include/clock.h"
|
2019-02-15 10:54:55 +01:00
|
|
|
#include "test/field_trial.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "test/gtest.h"
|
|
|
|
|
#include "test/mock_audio_encoder.h"
|
|
|
|
|
#include "test/mock_audio_encoder_factory.h"
|
2015-10-16 14:35:07 -07:00
|
|
|
|
|
|
|
|
namespace webrtc {
|
2015-10-27 03:35:21 -07:00
|
|
|
namespace test {
|
2015-11-03 10:15:49 +01:00
|
|
|
namespace {
|
|
|
|
|
|
2019-04-09 15:11:12 +02:00
|
|
|
using ::testing::_;
|
[getStats] Implement "media-source" audio levels, fixing Chrome bug.
Implements RTCAudioSourceStats members:
- audioLevel
- totalAudioEnergy
- totalSamplesDuration
In this CL description these are collectively referred to as the audio
levels.
The audio levels are removed from sending "track" stats (in Chrome,
these are now reported as undefined instead of 0).
Background:
For sending tracks, audio levels were always reported as 0 in Chrome
(https://crbug.com/736403), while audio levels were correctly reported
for receiving tracks. This problem affected the standard getStats() but
not the legacy getStats(), blocking some people from migrating. This
was likely not a problem in native third_party/webrtc code because the
delivery of audio frames from device to send-stream uses a different
code path outside of chromium.
A recent PR (https://github.com/w3c/webrtc-stats/pull/451) moved the
send-side audio levels to the RTCAudioSourceStats, while keeping the
receive-side audio levels on the "track" stats. This allows an
implementation to report the audio levels even if samples are not sent
onto the network (such as if an ICE connection has not been established
yet), reflecting some of the current implementation.
Changes:
1. Audio levels are added to RTCAudioSourceStats. Send-side audio
"track" stats are left undefined. Receive-side audio "track" stats
are not changed in this CL and continue to work.
2. Audio level computation is moved from the AudioState and
AudioTransportImpl to the AudioSendStream. This is because a) the
AudioTransportImpl::RecordedDataIsAvailable() code path is not
exercised in chromium, and b) audio levels should, per-spec, not be
calculated on a per-call basis, for which the AudioState is defined.
3. The audio level computation is now performed in
AudioSendStream::SendAudioData(), a code path used by both native
and chromium code.
4. Comments are added to document behavior of existing code, such as
AudioLevel and AudioSendStream::SendAudioData().
Note:
In this CL, just like before this CL, audio level is only calculated
after an AudioSendStream has been created. This means that before an
O/A negotiation, audio levels are unavailable.
According to spec, if we have an audio source, we should have audio
levels. An immediate solution to this would have been to calculate the
audio level at pc/rtp_sender.cc. The problem is that the
LocalAudioSinkAdapter::OnData() code path, while exercised in chromium,
is not exercised in native code. The issue of calculating audio levels
on a per-source bases rather than on a per-send stream basis is left to
https://crbug.com/webrtc/10771, an existing "media-source" bug.
This CL can be verified manually in Chrome at:
https://codepen.io/anon/pen/vqRGyq
Bug: chromium:736403, webrtc:10771
Change-Id: I8036cd9984f3b187c3177470a8c0d6670a201a5a
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/143789
Reviewed-by: Oskar Sundbom <ossu@webrtc.org>
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#28480}
2019-07-03 17:11:10 +02:00
|
|
|
using ::testing::AnyNumber;
|
2019-04-09 15:11:12 +02:00
|
|
|
using ::testing::Eq;
|
|
|
|
|
using ::testing::Field;
|
2020-08-20 16:48:49 +02:00
|
|
|
using ::testing::InSequence;
|
2019-04-09 15:11:12 +02:00
|
|
|
using ::testing::Invoke;
|
|
|
|
|
using ::testing::Ne;
|
2020-12-03 11:30:34 -05:00
|
|
|
using ::testing::NiceMock;
|
2019-04-09 15:11:12 +02:00
|
|
|
using ::testing::Return;
|
|
|
|
|
using ::testing::StrEq;
|
2015-11-16 07:34:50 -08:00
|
|
|
|
[getStats] Implement "media-source" audio levels, fixing Chrome bug.
Implements RTCAudioSourceStats members:
- audioLevel
- totalAudioEnergy
- totalSamplesDuration
In this CL description these are collectively referred to as the audio
levels.
The audio levels are removed from sending "track" stats (in Chrome,
these are now reported as undefined instead of 0).
Background:
For sending tracks, audio levels were always reported as 0 in Chrome
(https://crbug.com/736403), while audio levels were correctly reported
for receiving tracks. This problem affected the standard getStats() but
not the legacy getStats(), blocking some people from migrating. This
was likely not a problem in native third_party/webrtc code because the
delivery of audio frames from device to send-stream uses a different
code path outside of chromium.
A recent PR (https://github.com/w3c/webrtc-stats/pull/451) moved the
send-side audio levels to the RTCAudioSourceStats, while keeping the
receive-side audio levels on the "track" stats. This allows an
implementation to report the audio levels even if samples are not sent
onto the network (such as if an ICE connection has not been established
yet), reflecting some of the current implementation.
Changes:
1. Audio levels are added to RTCAudioSourceStats. Send-side audio
"track" stats are left undefined. Receive-side audio "track" stats
are not changed in this CL and continue to work.
2. Audio level computation is moved from the AudioState and
AudioTransportImpl to the AudioSendStream. This is because a) the
AudioTransportImpl::RecordedDataIsAvailable() code path is not
exercised in chromium, and b) audio levels should, per-spec, not be
calculated on a per-call basis, for which the AudioState is defined.
3. The audio level computation is now performed in
AudioSendStream::SendAudioData(), a code path used by both native
and chromium code.
4. Comments are added to document behavior of existing code, such as
AudioLevel and AudioSendStream::SendAudioData().
Note:
In this CL, just like before this CL, audio level is only calculated
after an AudioSendStream has been created. This means that before an
O/A negotiation, audio levels are unavailable.
According to spec, if we have an audio source, we should have audio
levels. An immediate solution to this would have been to calculate the
audio level at pc/rtp_sender.cc. The problem is that the
LocalAudioSinkAdapter::OnData() code path, while exercised in chromium,
is not exercised in native code. The issue of calculating audio levels
on a per-source bases rather than on a per-send stream basis is left to
https://crbug.com/webrtc/10771, an existing "media-source" bug.
This CL can be verified manually in Chrome at:
https://codepen.io/anon/pen/vqRGyq
Bug: chromium:736403, webrtc:10771
Change-Id: I8036cd9984f3b187c3177470a8c0d6670a201a5a
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/143789
Reviewed-by: Oskar Sundbom <ossu@webrtc.org>
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#28480}
2019-07-03 17:11:10 +02:00
|
|
|
static const float kTolerance = 0.0001f;
|
|
|
|
|
|
2015-11-03 10:15:49 +01:00
|
|
|
const uint32_t kSsrc = 1234;
|
2015-11-16 07:34:50 -08:00
|
|
|
const char* kCName = "foo_name";
|
|
|
|
|
const int kAudioLevelId = 2;
|
2015-12-07 10:26:18 +01:00
|
|
|
const int kTransportSequenceNumberId = 4;
|
2017-11-24 17:29:59 +01:00
|
|
|
const int32_t kEchoDelayMedian = 254;
|
|
|
|
|
const int32_t kEchoDelayStdDev = -3;
|
|
|
|
|
const double kDivergentFilterFraction = 0.2f;
|
|
|
|
|
const double kEchoReturnLoss = -65;
|
|
|
|
|
const double kEchoReturnLossEnhancement = 101;
|
|
|
|
|
const double kResidualEchoLikelihood = -1.0f;
|
|
|
|
|
const double kResidualEchoLikelihoodMax = 23.0f;
|
2019-10-09 15:01:33 +02:00
|
|
|
const CallSendStatistics kCallStats = {112, 12, 13456, 17890};
|
2015-11-06 15:34:49 -08:00
|
|
|
const ReportBlock kReportBlock = {456, 780, 123, 567, 890, 132, 143, 13354};
|
2015-12-04 15:22:19 +01:00
|
|
|
const int kTelephoneEventPayloadType = 123;
|
2016-11-17 05:25:37 -08:00
|
|
|
const int kTelephoneEventPayloadFrequency = 65432;
|
2016-03-11 03:06:41 -08:00
|
|
|
const int kTelephoneEventCode = 45;
|
|
|
|
|
const int kTelephoneEventDuration = 6789;
|
2017-04-27 02:08:52 -07:00
|
|
|
constexpr int kIsacPayloadType = 103;
|
|
|
|
|
const SdpAudioFormat kIsacFormat = {"isac", 16000, 1};
|
|
|
|
|
const SdpAudioFormat kOpusFormat = {"opus", 48000, 2};
|
|
|
|
|
const SdpAudioFormat kG722Format = {"g722", 8000, 1};
|
|
|
|
|
const AudioCodecSpec kCodecSpecs[] = {
|
|
|
|
|
{kIsacFormat, {16000, 1, 32000, 10000, 32000}},
|
|
|
|
|
{kOpusFormat, {48000, 1, 32000, 6000, 510000}},
|
|
|
|
|
{kG722Format, {16000, 1, 64000}}};
|
2015-11-06 15:34:49 -08:00
|
|
|
|
2019-05-03 14:40:13 +02:00
|
|
|
// TODO(dklee): This mirrors calculation in audio_send_stream.cc, which
|
|
|
|
|
// should be made more precise in the future. This can be changed when that
|
|
|
|
|
// logic is more accurate.
|
2020-02-17 18:46:07 +01:00
|
|
|
const DataSize kOverheadPerPacket = DataSize::Bytes(20 + 8 + 10 + 12);
|
2020-02-10 11:16:00 +01:00
|
|
|
const TimeDelta kMinFrameLength = TimeDelta::Millis(20);
|
|
|
|
|
const TimeDelta kMaxFrameLength = TimeDelta::Millis(120);
|
2019-10-02 12:27:06 +02:00
|
|
|
const DataRate kMinOverheadRate = kOverheadPerPacket / kMaxFrameLength;
|
|
|
|
|
const DataRate kMaxOverheadRate = kOverheadPerPacket / kMinFrameLength;
|
2019-05-03 14:40:13 +02:00
|
|
|
|
2016-07-26 04:44:06 -07:00
|
|
|
class MockLimitObserver : public BitrateAllocator::LimitObserver {
|
|
|
|
|
public:
|
2020-05-15 11:40:44 +02:00
|
|
|
MOCK_METHOD(void,
|
|
|
|
|
OnAllocationLimitsChanged,
|
|
|
|
|
(BitrateAllocationLimits),
|
|
|
|
|
(override));
|
2016-07-26 04:44:06 -07:00
|
|
|
};
|
|
|
|
|
|
2017-04-27 02:08:52 -07:00
|
|
|
std::unique_ptr<MockAudioEncoder> SetupAudioEncoderMock(
|
|
|
|
|
int payload_type,
|
|
|
|
|
const SdpAudioFormat& format) {
|
|
|
|
|
for (const auto& spec : kCodecSpecs) {
|
|
|
|
|
if (format == spec.format) {
|
2018-02-22 11:09:56 +01:00
|
|
|
std::unique_ptr<MockAudioEncoder> encoder(
|
2019-04-09 15:11:12 +02:00
|
|
|
new ::testing::NiceMock<MockAudioEncoder>());
|
2017-04-27 02:08:52 -07:00
|
|
|
ON_CALL(*encoder.get(), SampleRateHz())
|
|
|
|
|
.WillByDefault(Return(spec.info.sample_rate_hz));
|
|
|
|
|
ON_CALL(*encoder.get(), NumChannels())
|
|
|
|
|
.WillByDefault(Return(spec.info.num_channels));
|
|
|
|
|
ON_CALL(*encoder.get(), RtpTimestampRateHz())
|
|
|
|
|
.WillByDefault(Return(spec.format.clockrate_hz));
|
2019-10-02 12:27:06 +02:00
|
|
|
ON_CALL(*encoder.get(), GetFrameLengthRange())
|
|
|
|
|
.WillByDefault(Return(absl::optional<std::pair<TimeDelta, TimeDelta>>{
|
2020-02-10 11:16:00 +01:00
|
|
|
{TimeDelta::Millis(20), TimeDelta::Millis(120)}}));
|
2017-04-27 02:08:52 -07:00
|
|
|
return encoder;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rtc::scoped_refptr<MockAudioEncoderFactory> SetupEncoderFactoryMock() {
|
|
|
|
|
rtc::scoped_refptr<MockAudioEncoderFactory> factory =
|
2021-04-22 19:21:43 +02:00
|
|
|
rtc::make_ref_counted<MockAudioEncoderFactory>();
|
2017-04-27 02:08:52 -07:00
|
|
|
ON_CALL(*factory.get(), GetSupportedEncoders())
|
|
|
|
|
.WillByDefault(Return(std::vector<AudioCodecSpec>(
|
|
|
|
|
std::begin(kCodecSpecs), std::end(kCodecSpecs))));
|
|
|
|
|
ON_CALL(*factory.get(), QueryAudioEncoder(_))
|
2017-11-16 10:57:35 +01:00
|
|
|
.WillByDefault(Invoke(
|
2018-06-15 12:28:07 +02:00
|
|
|
[](const SdpAudioFormat& format) -> absl::optional<AudioCodecInfo> {
|
2017-11-16 10:57:35 +01:00
|
|
|
for (const auto& spec : kCodecSpecs) {
|
|
|
|
|
if (format == spec.format) {
|
|
|
|
|
return spec.info;
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-06-15 12:28:07 +02:00
|
|
|
return absl::nullopt;
|
2017-11-16 10:57:35 +01:00
|
|
|
}));
|
2018-02-27 13:37:31 +01:00
|
|
|
ON_CALL(*factory.get(), MakeAudioEncoderMock(_, _, _, _))
|
2017-04-27 02:08:52 -07:00
|
|
|
.WillByDefault(Invoke([](int payload_type, const SdpAudioFormat& format,
|
2018-06-15 12:28:07 +02:00
|
|
|
absl::optional<AudioCodecPairId> codec_pair_id,
|
2017-04-27 02:08:52 -07:00
|
|
|
std::unique_ptr<AudioEncoder>* return_value) {
|
|
|
|
|
*return_value = SetupAudioEncoderMock(payload_type, format);
|
|
|
|
|
}));
|
|
|
|
|
return factory;
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-06 15:34:49 -08:00
|
|
|
struct ConfigHelper {
|
2020-04-26 23:56:17 +02:00
|
|
|
ConfigHelper(bool audio_bwe_enabled,
|
|
|
|
|
bool expect_set_encoder_call,
|
|
|
|
|
bool use_null_audio_processing)
|
2019-03-04 17:05:12 +01:00
|
|
|
: clock_(1000000),
|
2019-03-22 12:59:48 +01:00
|
|
|
task_queue_factory_(CreateDefaultTaskQueueFactory()),
|
2019-11-26 09:19:40 -08:00
|
|
|
stream_config_(/*send_transport=*/nullptr),
|
2020-04-26 23:56:17 +02:00
|
|
|
audio_processing_(
|
|
|
|
|
use_null_audio_processing
|
|
|
|
|
? nullptr
|
2021-04-22 19:21:43 +02:00
|
|
|
: rtc::make_ref_counted<NiceMock<MockAudioProcessing>>()),
|
2019-09-19 14:54:43 +02:00
|
|
|
bitrate_allocator_(&limit_observer_),
|
2019-03-22 12:59:48 +01:00
|
|
|
worker_queue_(task_queue_factory_->CreateTaskQueue(
|
|
|
|
|
"ConfigHelper_worker_queue",
|
|
|
|
|
TaskQueueFactory::Priority::NORMAL)),
|
2017-07-26 14:18:40 +02:00
|
|
|
audio_encoder_(nullptr) {
|
2019-04-09 15:11:12 +02:00
|
|
|
using ::testing::Invoke;
|
2015-11-16 07:34:50 -08:00
|
|
|
|
2015-11-06 15:34:49 -08:00
|
|
|
AudioState::Config config;
|
2016-11-17 06:48:48 -08:00
|
|
|
config.audio_mixer = AudioMixerImpl::Create();
|
2017-06-29 08:32:09 -07:00
|
|
|
config.audio_processing = audio_processing_;
|
2021-04-22 19:21:43 +02:00
|
|
|
config.audio_device_module = rtc::make_ref_counted<MockAudioDeviceModule>();
|
2015-11-06 15:34:49 -08:00
|
|
|
audio_state_ = AudioState::Create(config);
|
2015-11-16 07:34:50 -08:00
|
|
|
|
2018-11-19 10:27:07 +01:00
|
|
|
SetupDefaultChannelSend(audio_bwe_enabled);
|
2017-04-27 02:08:52 -07:00
|
|
|
SetupMockForSetupSendCodec(expect_set_encoder_call);
|
2020-03-06 09:49:29 +01:00
|
|
|
SetupMockForCallEncoder();
|
2016-10-31 04:08:32 -07:00
|
|
|
|
2021-07-26 11:47:07 +02:00
|
|
|
// Use ISAC as default codec so as to prevent unnecessary `channel_proxy_`
|
2017-04-27 02:08:52 -07:00
|
|
|
// calls from the default ctor behavior.
|
|
|
|
|
stream_config_.send_codec_spec =
|
2017-11-16 10:57:35 +01:00
|
|
|
AudioSendStream::Config::SendCodecSpec(kIsacPayloadType, kIsacFormat);
|
2015-11-06 15:34:49 -08:00
|
|
|
stream_config_.rtp.ssrc = kSsrc;
|
2015-11-16 07:34:50 -08:00
|
|
|
stream_config_.rtp.c_name = kCName;
|
|
|
|
|
stream_config_.rtp.extensions.push_back(
|
2016-05-26 11:24:55 -07:00
|
|
|
RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
|
2017-02-07 07:14:08 -08:00
|
|
|
if (audio_bwe_enabled) {
|
2017-06-30 01:38:56 -07:00
|
|
|
AddBweToConfig(&stream_config_);
|
2017-02-07 07:14:08 -08:00
|
|
|
}
|
2017-04-27 02:08:52 -07:00
|
|
|
stream_config_.encoder_factory = SetupEncoderFactoryMock();
|
2016-11-30 04:47:39 -08:00
|
|
|
stream_config_.min_bitrate_bps = 10000;
|
|
|
|
|
stream_config_.max_bitrate_bps = 65000;
|
2015-11-06 15:34:49 -08:00
|
|
|
}
|
|
|
|
|
|
2018-01-11 13:52:30 +01:00
|
|
|
std::unique_ptr<internal::AudioSendStream> CreateAudioSendStream() {
|
2019-03-07 09:17:19 +01:00
|
|
|
EXPECT_CALL(rtp_transport_, GetWorkerQueue())
|
|
|
|
|
.WillRepeatedly(Return(&worker_queue_));
|
2018-01-11 13:52:30 +01:00
|
|
|
return std::unique_ptr<internal::AudioSendStream>(
|
|
|
|
|
new internal::AudioSendStream(
|
2019-03-04 17:43:34 +01:00
|
|
|
Clock::GetRealTimeClock(), stream_config_, audio_state_,
|
2019-03-22 12:59:48 +01:00
|
|
|
task_queue_factory_.get(), &rtp_transport_, &bitrate_allocator_,
|
2020-04-27 12:01:11 +02:00
|
|
|
&event_log_, absl::nullopt,
|
2018-11-19 10:27:07 +01:00
|
|
|
std::unique_ptr<voe::ChannelSendInterface>(channel_send_)));
|
2018-01-11 13:52:30 +01:00
|
|
|
}
|
|
|
|
|
|
2015-11-06 15:34:49 -08:00
|
|
|
AudioSendStream::Config& config() { return stream_config_; }
|
2017-04-27 02:08:52 -07:00
|
|
|
MockAudioEncoderFactory& mock_encoder_factory() {
|
|
|
|
|
return *static_cast<MockAudioEncoderFactory*>(
|
|
|
|
|
stream_config_.encoder_factory.get());
|
|
|
|
|
}
|
2020-06-03 22:55:33 +02:00
|
|
|
MockRtpRtcpInterface* rtp_rtcp() { return &rtp_rtcp_; }
|
2018-11-19 10:27:07 +01:00
|
|
|
MockChannelSend* channel_send() { return channel_send_; }
|
2018-02-20 09:06:11 +01:00
|
|
|
RtpTransportControllerSendInterface* transport() { return &rtp_transport_; }
|
2016-10-20 03:27:12 -07:00
|
|
|
|
2017-06-30 01:38:56 -07:00
|
|
|
static void AddBweToConfig(AudioSendStream::Config* config) {
|
|
|
|
|
config->rtp.extensions.push_back(RtpExtension(
|
|
|
|
|
RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId));
|
|
|
|
|
config->send_codec_spec->transport_cc_enabled = true;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-19 10:27:07 +01:00
|
|
|
void SetupDefaultChannelSend(bool audio_bwe_enabled) {
|
|
|
|
|
EXPECT_TRUE(channel_send_ == nullptr);
|
2019-04-09 15:11:12 +02:00
|
|
|
channel_send_ = new ::testing::StrictMock<MockChannelSend>();
|
2018-11-19 10:27:07 +01:00
|
|
|
EXPECT_CALL(*channel_send_, GetRtpRtcp()).WillRepeatedly(Invoke([this]() {
|
2018-08-08 10:49:16 +02:00
|
|
|
return &this->rtp_rtcp_;
|
|
|
|
|
}));
|
2019-08-21 13:36:20 +02:00
|
|
|
EXPECT_CALL(rtp_rtcp_, SSRC).WillRepeatedly(Return(kSsrc));
|
2018-11-19 10:27:07 +01:00
|
|
|
EXPECT_CALL(*channel_send_, SetRTCP_CNAME(StrEq(kCName))).Times(1);
|
|
|
|
|
EXPECT_CALL(*channel_send_, SetFrameEncryptor(_)).Times(1);
|
2020-03-31 11:29:56 +02:00
|
|
|
EXPECT_CALL(*channel_send_, SetEncoderToPacketizerFrameTransformer(_))
|
|
|
|
|
.Times(1);
|
2020-01-14 17:55:19 +01:00
|
|
|
EXPECT_CALL(rtp_rtcp_, SetExtmapAllowMixed(false)).Times(1);
|
2018-11-19 10:27:07 +01:00
|
|
|
EXPECT_CALL(*channel_send_,
|
2016-10-31 04:08:32 -07:00
|
|
|
SetSendAudioLevelIndicationStatus(true, kAudioLevelId))
|
|
|
|
|
.Times(1);
|
2018-02-22 14:49:02 +01:00
|
|
|
EXPECT_CALL(rtp_transport_, GetBandwidthObserver())
|
|
|
|
|
.WillRepeatedly(Return(&bandwidth_observer_));
|
2017-02-07 07:14:08 -08:00
|
|
|
if (audio_bwe_enabled) {
|
2020-01-14 17:55:19 +01:00
|
|
|
EXPECT_CALL(rtp_rtcp_,
|
|
|
|
|
RegisterRtpHeaderExtension(TransportSequenceNumber::kUri,
|
|
|
|
|
kTransportSequenceNumberId))
|
2017-02-07 07:14:08 -08:00
|
|
|
.Times(1);
|
2018-11-19 10:27:07 +01:00
|
|
|
EXPECT_CALL(*channel_send_,
|
2018-02-22 14:49:02 +01:00
|
|
|
RegisterSenderCongestionControlObjects(
|
|
|
|
|
&rtp_transport_, Eq(&bandwidth_observer_)))
|
2017-02-07 07:14:08 -08:00
|
|
|
.Times(1);
|
|
|
|
|
} else {
|
2018-11-19 10:27:07 +01:00
|
|
|
EXPECT_CALL(*channel_send_, RegisterSenderCongestionControlObjects(
|
|
|
|
|
&rtp_transport_, Eq(nullptr)))
|
2017-02-07 07:14:08 -08:00
|
|
|
.Times(1);
|
|
|
|
|
}
|
2018-11-19 10:27:07 +01:00
|
|
|
EXPECT_CALL(*channel_send_, ResetSenderCongestionControlObjects()).Times(1);
|
2020-01-14 17:55:19 +01:00
|
|
|
EXPECT_CALL(rtp_rtcp_, SetRid(std::string())).Times(1);
|
2016-10-31 04:08:32 -07:00
|
|
|
}
|
|
|
|
|
|
2017-04-27 02:08:52 -07:00
|
|
|
void SetupMockForSetupSendCodec(bool expect_set_encoder_call) {
|
|
|
|
|
if (expect_set_encoder_call) {
|
2020-05-15 11:40:44 +02:00
|
|
|
EXPECT_CALL(*channel_send_, SetEncoder)
|
|
|
|
|
.WillOnce(
|
|
|
|
|
[this](int payload_type, std::unique_ptr<AudioEncoder> encoder) {
|
|
|
|
|
this->audio_encoder_ = std::move(encoder);
|
2017-07-26 14:18:40 +02:00
|
|
|
return true;
|
2020-05-15 11:40:44 +02:00
|
|
|
});
|
2017-04-27 02:08:52 -07:00
|
|
|
}
|
2016-10-20 03:27:12 -07:00
|
|
|
}
|
2017-04-27 02:08:52 -07:00
|
|
|
|
2019-02-13 15:11:42 +01:00
|
|
|
void SetupMockForCallEncoder() {
|
2017-07-26 14:18:40 +02:00
|
|
|
// Let ModifyEncoder to invoke mock audio encoder.
|
2019-02-13 15:11:42 +01:00
|
|
|
EXPECT_CALL(*channel_send_, CallEncoder(_))
|
2019-01-16 14:49:44 +01:00
|
|
|
.WillRepeatedly(
|
2019-02-13 15:11:42 +01:00
|
|
|
[this](rtc::FunctionView<void(AudioEncoder*)> modifier) {
|
2017-07-26 14:18:40 +02:00
|
|
|
if (this->audio_encoder_)
|
2019-02-13 15:11:42 +01:00
|
|
|
modifier(this->audio_encoder_.get());
|
2019-01-16 14:49:44 +01:00
|
|
|
});
|
2017-07-26 14:18:40 +02:00
|
|
|
}
|
|
|
|
|
|
2015-12-04 15:22:19 +01:00
|
|
|
void SetupMockForSendTelephoneEvent() {
|
2018-11-19 10:27:07 +01:00
|
|
|
EXPECT_TRUE(channel_send_);
|
|
|
|
|
EXPECT_CALL(*channel_send_, SetSendTelephoneEventPayloadType(
|
|
|
|
|
kTelephoneEventPayloadType,
|
2019-03-05 14:29:42 +01:00
|
|
|
kTelephoneEventPayloadFrequency));
|
2015-12-04 15:22:19 +01:00
|
|
|
EXPECT_CALL(
|
2018-11-19 10:27:07 +01:00
|
|
|
*channel_send_,
|
2015-12-04 15:22:19 +01:00
|
|
|
SendTelephoneEventOutband(kTelephoneEventCode, kTelephoneEventDuration))
|
|
|
|
|
.WillOnce(Return(true));
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-26 23:56:17 +02:00
|
|
|
void SetupMockForGetStats(bool use_null_audio_processing) {
|
2019-04-09 15:11:12 +02:00
|
|
|
using ::testing::DoAll;
|
|
|
|
|
using ::testing::SetArgPointee;
|
|
|
|
|
using ::testing::SetArgReferee;
|
2015-11-16 07:34:50 -08:00
|
|
|
|
2015-11-06 15:34:49 -08:00
|
|
|
std::vector<ReportBlock> report_blocks;
|
|
|
|
|
webrtc::ReportBlock block = kReportBlock;
|
|
|
|
|
report_blocks.push_back(block); // Has wrong SSRC.
|
|
|
|
|
block.source_SSRC = kSsrc;
|
|
|
|
|
report_blocks.push_back(block); // Correct block.
|
|
|
|
|
block.fraction_lost = 0;
|
|
|
|
|
report_blocks.push_back(block); // Duplicate SSRC, bad fraction_lost.
|
|
|
|
|
|
2018-11-19 10:27:07 +01:00
|
|
|
EXPECT_TRUE(channel_send_);
|
|
|
|
|
EXPECT_CALL(*channel_send_, GetRTCPStatistics())
|
2015-11-27 10:46:42 -08:00
|
|
|
.WillRepeatedly(Return(kCallStats));
|
2018-11-19 10:27:07 +01:00
|
|
|
EXPECT_CALL(*channel_send_, GetRemoteRTCPReportBlocks())
|
2015-11-27 10:46:42 -08:00
|
|
|
.WillRepeatedly(Return(report_blocks));
|
2018-11-19 10:27:07 +01:00
|
|
|
EXPECT_CALL(*channel_send_, GetANAStatistics())
|
2017-09-08 08:13:19 -07:00
|
|
|
.WillRepeatedly(Return(ANAStats()));
|
2018-11-19 10:27:07 +01:00
|
|
|
EXPECT_CALL(*channel_send_, GetBitrate()).WillRepeatedly(Return(0));
|
2017-03-01 17:02:23 -08:00
|
|
|
|
2017-11-24 17:29:59 +01:00
|
|
|
audio_processing_stats_.echo_return_loss = kEchoReturnLoss;
|
|
|
|
|
audio_processing_stats_.echo_return_loss_enhancement =
|
|
|
|
|
kEchoReturnLossEnhancement;
|
|
|
|
|
audio_processing_stats_.delay_median_ms = kEchoDelayMedian;
|
|
|
|
|
audio_processing_stats_.delay_standard_deviation_ms = kEchoDelayStdDev;
|
|
|
|
|
audio_processing_stats_.divergent_filter_fraction =
|
|
|
|
|
kDivergentFilterFraction;
|
|
|
|
|
audio_processing_stats_.residual_echo_likelihood = kResidualEchoLikelihood;
|
|
|
|
|
audio_processing_stats_.residual_echo_likelihood_recent_max =
|
|
|
|
|
kResidualEchoLikelihoodMax;
|
2020-04-26 23:56:17 +02:00
|
|
|
if (!use_null_audio_processing) {
|
|
|
|
|
ASSERT_TRUE(audio_processing_);
|
|
|
|
|
EXPECT_CALL(*audio_processing_, GetStatistics(true))
|
|
|
|
|
.WillRepeatedly(Return(audio_processing_stats_));
|
|
|
|
|
}
|
2015-11-06 15:34:49 -08:00
|
|
|
}
|
2020-04-26 23:56:17 +02:00
|
|
|
|
2019-10-02 12:27:06 +02:00
|
|
|
TaskQueueForTest* worker() { return &worker_queue_; }
|
2015-11-06 15:34:49 -08:00
|
|
|
|
|
|
|
|
private:
|
2019-03-04 17:05:12 +01:00
|
|
|
SimulatedClock clock_;
|
2019-03-22 12:59:48 +01:00
|
|
|
std::unique_ptr<TaskQueueFactory> task_queue_factory_;
|
2015-11-06 15:34:49 -08:00
|
|
|
rtc::scoped_refptr<AudioState> audio_state_;
|
|
|
|
|
AudioSendStream::Config stream_config_;
|
2019-04-09 15:11:12 +02:00
|
|
|
::testing::StrictMock<MockChannelSend>* channel_send_ = nullptr;
|
2017-06-29 08:32:09 -07:00
|
|
|
rtc::scoped_refptr<MockAudioProcessing> audio_processing_;
|
2017-11-24 17:29:59 +01:00
|
|
|
AudioProcessingStats audio_processing_stats_;
|
2019-04-09 15:11:12 +02:00
|
|
|
::testing::StrictMock<MockRtcpBandwidthObserver> bandwidth_observer_;
|
|
|
|
|
::testing::NiceMock<MockRtcEventLog> event_log_;
|
|
|
|
|
::testing::NiceMock<MockRtpTransportControllerSend> rtp_transport_;
|
2020-06-03 22:55:33 +02:00
|
|
|
::testing::NiceMock<MockRtpRtcpInterface> rtp_rtcp_;
|
2019-04-09 15:11:12 +02:00
|
|
|
::testing::NiceMock<MockLimitObserver> limit_observer_;
|
2016-07-26 04:44:06 -07:00
|
|
|
BitrateAllocator bitrate_allocator_;
|
2021-07-26 11:47:07 +02:00
|
|
|
// `worker_queue` is defined last to ensure all pending tasks are cancelled
|
2016-09-01 01:17:40 -07:00
|
|
|
// and deleted before any other members.
|
2019-03-22 12:59:48 +01:00
|
|
|
TaskQueueForTest worker_queue_;
|
2017-07-26 14:18:40 +02:00
|
|
|
std::unique_ptr<AudioEncoder> audio_encoder_;
|
2015-11-06 15:34:49 -08:00
|
|
|
};
|
[getStats] Implement "media-source" audio levels, fixing Chrome bug.
Implements RTCAudioSourceStats members:
- audioLevel
- totalAudioEnergy
- totalSamplesDuration
In this CL description these are collectively referred to as the audio
levels.
The audio levels are removed from sending "track" stats (in Chrome,
these are now reported as undefined instead of 0).
Background:
For sending tracks, audio levels were always reported as 0 in Chrome
(https://crbug.com/736403), while audio levels were correctly reported
for receiving tracks. This problem affected the standard getStats() but
not the legacy getStats(), blocking some people from migrating. This
was likely not a problem in native third_party/webrtc code because the
delivery of audio frames from device to send-stream uses a different
code path outside of chromium.
A recent PR (https://github.com/w3c/webrtc-stats/pull/451) moved the
send-side audio levels to the RTCAudioSourceStats, while keeping the
receive-side audio levels on the "track" stats. This allows an
implementation to report the audio levels even if samples are not sent
onto the network (such as if an ICE connection has not been established
yet), reflecting some of the current implementation.
Changes:
1. Audio levels are added to RTCAudioSourceStats. Send-side audio
"track" stats are left undefined. Receive-side audio "track" stats
are not changed in this CL and continue to work.
2. Audio level computation is moved from the AudioState and
AudioTransportImpl to the AudioSendStream. This is because a) the
AudioTransportImpl::RecordedDataIsAvailable() code path is not
exercised in chromium, and b) audio levels should, per-spec, not be
calculated on a per-call basis, for which the AudioState is defined.
3. The audio level computation is now performed in
AudioSendStream::SendAudioData(), a code path used by both native
and chromium code.
4. Comments are added to document behavior of existing code, such as
AudioLevel and AudioSendStream::SendAudioData().
Note:
In this CL, just like before this CL, audio level is only calculated
after an AudioSendStream has been created. This means that before an
O/A negotiation, audio levels are unavailable.
According to spec, if we have an audio source, we should have audio
levels. An immediate solution to this would have been to calculate the
audio level at pc/rtp_sender.cc. The problem is that the
LocalAudioSinkAdapter::OnData() code path, while exercised in chromium,
is not exercised in native code. The issue of calculating audio levels
on a per-source bases rather than on a per-send stream basis is left to
https://crbug.com/webrtc/10771, an existing "media-source" bug.
This CL can be verified manually in Chrome at:
https://codepen.io/anon/pen/vqRGyq
Bug: chromium:736403, webrtc:10771
Change-Id: I8036cd9984f3b187c3177470a8c0d6670a201a5a
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/143789
Reviewed-by: Oskar Sundbom <ossu@webrtc.org>
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#28480}
2019-07-03 17:11:10 +02:00
|
|
|
|
|
|
|
|
// The audio level ranges linearly [0,32767].
|
|
|
|
|
std::unique_ptr<AudioFrame> CreateAudioFrame1kHzSineWave(int16_t audio_level,
|
|
|
|
|
int duration_ms,
|
|
|
|
|
int sample_rate_hz,
|
|
|
|
|
size_t num_channels) {
|
|
|
|
|
size_t samples_per_channel = sample_rate_hz / (1000 / duration_ms);
|
|
|
|
|
std::vector<int16_t> audio_data(samples_per_channel * num_channels, 0);
|
2019-09-17 17:06:18 +02:00
|
|
|
std::unique_ptr<AudioFrame> audio_frame = std::make_unique<AudioFrame>();
|
[getStats] Implement "media-source" audio levels, fixing Chrome bug.
Implements RTCAudioSourceStats members:
- audioLevel
- totalAudioEnergy
- totalSamplesDuration
In this CL description these are collectively referred to as the audio
levels.
The audio levels are removed from sending "track" stats (in Chrome,
these are now reported as undefined instead of 0).
Background:
For sending tracks, audio levels were always reported as 0 in Chrome
(https://crbug.com/736403), while audio levels were correctly reported
for receiving tracks. This problem affected the standard getStats() but
not the legacy getStats(), blocking some people from migrating. This
was likely not a problem in native third_party/webrtc code because the
delivery of audio frames from device to send-stream uses a different
code path outside of chromium.
A recent PR (https://github.com/w3c/webrtc-stats/pull/451) moved the
send-side audio levels to the RTCAudioSourceStats, while keeping the
receive-side audio levels on the "track" stats. This allows an
implementation to report the audio levels even if samples are not sent
onto the network (such as if an ICE connection has not been established
yet), reflecting some of the current implementation.
Changes:
1. Audio levels are added to RTCAudioSourceStats. Send-side audio
"track" stats are left undefined. Receive-side audio "track" stats
are not changed in this CL and continue to work.
2. Audio level computation is moved from the AudioState and
AudioTransportImpl to the AudioSendStream. This is because a) the
AudioTransportImpl::RecordedDataIsAvailable() code path is not
exercised in chromium, and b) audio levels should, per-spec, not be
calculated on a per-call basis, for which the AudioState is defined.
3. The audio level computation is now performed in
AudioSendStream::SendAudioData(), a code path used by both native
and chromium code.
4. Comments are added to document behavior of existing code, such as
AudioLevel and AudioSendStream::SendAudioData().
Note:
In this CL, just like before this CL, audio level is only calculated
after an AudioSendStream has been created. This means that before an
O/A negotiation, audio levels are unavailable.
According to spec, if we have an audio source, we should have audio
levels. An immediate solution to this would have been to calculate the
audio level at pc/rtp_sender.cc. The problem is that the
LocalAudioSinkAdapter::OnData() code path, while exercised in chromium,
is not exercised in native code. The issue of calculating audio levels
on a per-source bases rather than on a per-send stream basis is left to
https://crbug.com/webrtc/10771, an existing "media-source" bug.
This CL can be verified manually in Chrome at:
https://codepen.io/anon/pen/vqRGyq
Bug: chromium:736403, webrtc:10771
Change-Id: I8036cd9984f3b187c3177470a8c0d6670a201a5a
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/143789
Reviewed-by: Oskar Sundbom <ossu@webrtc.org>
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#28480}
2019-07-03 17:11:10 +02:00
|
|
|
audio_frame->UpdateFrame(0 /* RTP timestamp */, &audio_data[0],
|
|
|
|
|
samples_per_channel, sample_rate_hz,
|
|
|
|
|
AudioFrame::SpeechType::kNormalSpeech,
|
|
|
|
|
AudioFrame::VADActivity::kVadUnknown, num_channels);
|
|
|
|
|
SineWaveGenerator wave_generator(1000.0, audio_level);
|
|
|
|
|
wave_generator.GenerateNextFrame(audio_frame.get());
|
|
|
|
|
return audio_frame;
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-03 10:15:49 +01:00
|
|
|
} // namespace
|
2015-10-16 14:35:07 -07:00
|
|
|
|
|
|
|
|
TEST(AudioSendStreamTest, ConfigToString) {
|
2019-11-26 09:19:40 -08:00
|
|
|
AudioSendStream::Config config(/*send_transport=*/nullptr);
|
2015-11-03 10:15:49 +01:00
|
|
|
config.rtp.ssrc = kSsrc;
|
2015-11-16 07:34:50 -08:00
|
|
|
config.rtp.c_name = kCName;
|
2016-11-07 09:29:22 -08:00
|
|
|
config.min_bitrate_bps = 12000;
|
|
|
|
|
config.max_bitrate_bps = 34000;
|
2020-08-11 14:05:07 +02:00
|
|
|
config.has_dscp = true;
|
2017-04-27 02:08:52 -07:00
|
|
|
config.send_codec_spec =
|
2017-11-16 10:57:35 +01:00
|
|
|
AudioSendStream::Config::SendCodecSpec(kIsacPayloadType, kIsacFormat);
|
2017-04-27 02:08:52 -07:00
|
|
|
config.send_codec_spec->nack_enabled = true;
|
|
|
|
|
config.send_codec_spec->transport_cc_enabled = false;
|
2017-11-16 10:57:35 +01:00
|
|
|
config.send_codec_spec->cng_payload_type = 42;
|
2020-06-24 12:52:42 +02:00
|
|
|
config.send_codec_spec->red_payload_type = 43;
|
2017-04-27 02:08:52 -07:00
|
|
|
config.encoder_factory = MockAudioEncoderFactory::CreateUnusedFactory();
|
2018-10-29 11:22:05 +01:00
|
|
|
config.rtp.extmap_allow_mixed = true;
|
2016-11-01 03:17:16 -07:00
|
|
|
config.rtp.extensions.push_back(
|
|
|
|
|
RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
|
2018-11-09 13:17:39 -08:00
|
|
|
config.rtcp_report_interval_ms = 2500;
|
2015-11-03 10:15:49 +01:00
|
|
|
EXPECT_EQ(
|
2018-10-29 11:22:05 +01:00
|
|
|
"{rtp: {ssrc: 1234, extmap-allow-mixed: true, extensions: [{uri: "
|
2018-11-21 09:21:23 +01:00
|
|
|
"urn:ietf:params:rtp-hdrext:ssrc-audio-level, id: 2}], "
|
|
|
|
|
"c_name: foo_name}, rtcp_report_interval_ms: 2500, "
|
2019-11-26 09:19:40 -08:00
|
|
|
"send_transport: null, "
|
2020-08-11 14:05:07 +02:00
|
|
|
"min_bitrate_bps: 12000, max_bitrate_bps: 34000, has "
|
|
|
|
|
"audio_network_adaptor_config: false, has_dscp: true, "
|
2016-10-25 11:19:07 -07:00
|
|
|
"send_codec_spec: {nack_enabled: true, transport_cc_enabled: false, "
|
2020-06-24 12:52:42 +02:00
|
|
|
"cng_payload_type: 42, red_payload_type: 43, payload_type: 103, "
|
2017-04-27 02:08:52 -07:00
|
|
|
"format: {name: isac, clockrate_hz: 16000, num_channels: 1, "
|
|
|
|
|
"parameters: {}}}}",
|
2015-10-27 03:35:21 -07:00
|
|
|
config.ToString());
|
2015-10-16 14:35:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(AudioSendStreamTest, ConstructDestruct) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
}
|
2015-10-16 14:35:07 -07:00
|
|
|
}
|
2015-10-27 03:35:21 -07:00
|
|
|
|
2015-12-04 15:22:19 +01:00
|
|
|
TEST(AudioSendStreamTest, SendTelephoneEvent) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
helper.SetupMockForSendTelephoneEvent();
|
|
|
|
|
EXPECT_TRUE(send_stream->SendTelephoneEvent(
|
|
|
|
|
kTelephoneEventPayloadType, kTelephoneEventPayloadFrequency,
|
|
|
|
|
kTelephoneEventCode, kTelephoneEventDuration));
|
|
|
|
|
}
|
2015-12-04 15:22:19 +01:00
|
|
|
}
|
|
|
|
|
|
2016-06-16 10:53:22 -07:00
|
|
|
TEST(AudioSendStreamTest, SetMuted) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(), SetInputMute(true));
|
|
|
|
|
send_stream->SetMuted(true);
|
|
|
|
|
}
|
2016-06-16 10:53:22 -07:00
|
|
|
}
|
|
|
|
|
|
2017-02-07 07:14:08 -08:00
|
|
|
TEST(AudioSendStreamTest, AudioBweCorrectObjectsOnChannelProxy) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(true, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
}
|
2017-02-07 07:14:08 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(AudioSendStreamTest, NoAudioBweCorrectObjectsOnChannelProxy) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
}
|
2017-02-07 07:14:08 -08:00
|
|
|
}
|
|
|
|
|
|
2015-10-27 03:35:21 -07:00
|
|
|
TEST(AudioSendStreamTest, GetStats) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
helper.SetupMockForGetStats(use_null_audio_processing);
|
|
|
|
|
AudioSendStream::Stats stats = send_stream->GetStats(true);
|
|
|
|
|
EXPECT_EQ(kSsrc, stats.local_ssrc);
|
|
|
|
|
EXPECT_EQ(kCallStats.payload_bytes_sent, stats.payload_bytes_sent);
|
|
|
|
|
EXPECT_EQ(kCallStats.header_and_padding_bytes_sent,
|
|
|
|
|
stats.header_and_padding_bytes_sent);
|
|
|
|
|
EXPECT_EQ(kCallStats.packetsSent, stats.packets_sent);
|
|
|
|
|
EXPECT_EQ(kReportBlock.cumulative_num_packets_lost, stats.packets_lost);
|
|
|
|
|
EXPECT_EQ(Q8ToFloat(kReportBlock.fraction_lost), stats.fraction_lost);
|
|
|
|
|
EXPECT_EQ(kIsacFormat.name, stats.codec_name);
|
|
|
|
|
EXPECT_EQ(static_cast<int32_t>(kReportBlock.interarrival_jitter /
|
|
|
|
|
(kIsacFormat.clockrate_hz / 1000)),
|
|
|
|
|
stats.jitter_ms);
|
|
|
|
|
EXPECT_EQ(kCallStats.rttMs, stats.rtt_ms);
|
|
|
|
|
EXPECT_EQ(0, stats.audio_level);
|
|
|
|
|
EXPECT_EQ(0, stats.total_input_energy);
|
|
|
|
|
EXPECT_EQ(0, stats.total_input_duration);
|
|
|
|
|
|
|
|
|
|
if (!use_null_audio_processing) {
|
|
|
|
|
EXPECT_EQ(kEchoDelayMedian, stats.apm_statistics.delay_median_ms);
|
|
|
|
|
EXPECT_EQ(kEchoDelayStdDev,
|
|
|
|
|
stats.apm_statistics.delay_standard_deviation_ms);
|
|
|
|
|
EXPECT_EQ(kEchoReturnLoss, stats.apm_statistics.echo_return_loss);
|
|
|
|
|
EXPECT_EQ(kEchoReturnLossEnhancement,
|
|
|
|
|
stats.apm_statistics.echo_return_loss_enhancement);
|
|
|
|
|
EXPECT_EQ(kDivergentFilterFraction,
|
|
|
|
|
stats.apm_statistics.divergent_filter_fraction);
|
|
|
|
|
EXPECT_EQ(kResidualEchoLikelihood,
|
|
|
|
|
stats.apm_statistics.residual_echo_likelihood);
|
|
|
|
|
EXPECT_EQ(kResidualEchoLikelihoodMax,
|
|
|
|
|
stats.apm_statistics.residual_echo_likelihood_recent_max);
|
|
|
|
|
EXPECT_FALSE(stats.typing_noise_detected);
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-11-06 15:34:49 -08:00
|
|
|
}
|
2016-10-20 03:27:12 -07:00
|
|
|
|
[getStats] Implement "media-source" audio levels, fixing Chrome bug.
Implements RTCAudioSourceStats members:
- audioLevel
- totalAudioEnergy
- totalSamplesDuration
In this CL description these are collectively referred to as the audio
levels.
The audio levels are removed from sending "track" stats (in Chrome,
these are now reported as undefined instead of 0).
Background:
For sending tracks, audio levels were always reported as 0 in Chrome
(https://crbug.com/736403), while audio levels were correctly reported
for receiving tracks. This problem affected the standard getStats() but
not the legacy getStats(), blocking some people from migrating. This
was likely not a problem in native third_party/webrtc code because the
delivery of audio frames from device to send-stream uses a different
code path outside of chromium.
A recent PR (https://github.com/w3c/webrtc-stats/pull/451) moved the
send-side audio levels to the RTCAudioSourceStats, while keeping the
receive-side audio levels on the "track" stats. This allows an
implementation to report the audio levels even if samples are not sent
onto the network (such as if an ICE connection has not been established
yet), reflecting some of the current implementation.
Changes:
1. Audio levels are added to RTCAudioSourceStats. Send-side audio
"track" stats are left undefined. Receive-side audio "track" stats
are not changed in this CL and continue to work.
2. Audio level computation is moved from the AudioState and
AudioTransportImpl to the AudioSendStream. This is because a) the
AudioTransportImpl::RecordedDataIsAvailable() code path is not
exercised in chromium, and b) audio levels should, per-spec, not be
calculated on a per-call basis, for which the AudioState is defined.
3. The audio level computation is now performed in
AudioSendStream::SendAudioData(), a code path used by both native
and chromium code.
4. Comments are added to document behavior of existing code, such as
AudioLevel and AudioSendStream::SendAudioData().
Note:
In this CL, just like before this CL, audio level is only calculated
after an AudioSendStream has been created. This means that before an
O/A negotiation, audio levels are unavailable.
According to spec, if we have an audio source, we should have audio
levels. An immediate solution to this would have been to calculate the
audio level at pc/rtp_sender.cc. The problem is that the
LocalAudioSinkAdapter::OnData() code path, while exercised in chromium,
is not exercised in native code. The issue of calculating audio levels
on a per-source bases rather than on a per-send stream basis is left to
https://crbug.com/webrtc/10771, an existing "media-source" bug.
This CL can be verified manually in Chrome at:
https://codepen.io/anon/pen/vqRGyq
Bug: chromium:736403, webrtc:10771
Change-Id: I8036cd9984f3b187c3177470a8c0d6670a201a5a
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/143789
Reviewed-by: Oskar Sundbom <ossu@webrtc.org>
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#28480}
2019-07-03 17:11:10 +02:00
|
|
|
TEST(AudioSendStreamTest, GetStatsAudioLevel) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
helper.SetupMockForGetStats(use_null_audio_processing);
|
2020-05-15 11:40:44 +02:00
|
|
|
EXPECT_CALL(*helper.channel_send(), ProcessAndEncodeAudio)
|
2020-04-26 23:56:17 +02:00
|
|
|
.Times(AnyNumber());
|
|
|
|
|
|
|
|
|
|
constexpr int kSampleRateHz = 48000;
|
|
|
|
|
constexpr size_t kNumChannels = 1;
|
|
|
|
|
|
|
|
|
|
constexpr int16_t kSilentAudioLevel = 0;
|
|
|
|
|
constexpr int16_t kMaxAudioLevel = 32767; // Audio level is [0,32767].
|
|
|
|
|
constexpr int kAudioFrameDurationMs = 10;
|
|
|
|
|
|
|
|
|
|
// Process 10 audio frames (100 ms) of silence. After this, on the next
|
|
|
|
|
// (11-th) frame, the audio level will be updated with the maximum audio
|
|
|
|
|
// level of the first 11 frames. See AudioLevel.
|
|
|
|
|
for (size_t i = 0; i < 10; ++i) {
|
|
|
|
|
send_stream->SendAudioData(
|
|
|
|
|
CreateAudioFrame1kHzSineWave(kSilentAudioLevel, kAudioFrameDurationMs,
|
|
|
|
|
kSampleRateHz, kNumChannels));
|
|
|
|
|
}
|
|
|
|
|
AudioSendStream::Stats stats = send_stream->GetStats();
|
|
|
|
|
EXPECT_EQ(kSilentAudioLevel, stats.audio_level);
|
|
|
|
|
EXPECT_NEAR(0.0f, stats.total_input_energy, kTolerance);
|
|
|
|
|
EXPECT_NEAR(0.1f, stats.total_input_duration,
|
|
|
|
|
kTolerance); // 100 ms = 0.1 s
|
|
|
|
|
|
|
|
|
|
// Process 10 audio frames (100 ms) of maximum audio level.
|
|
|
|
|
// Note that AudioLevel updates the audio level every 11th frame, processing
|
|
|
|
|
// 10 frames above was needed to see a non-zero audio level here.
|
|
|
|
|
for (size_t i = 0; i < 10; ++i) {
|
|
|
|
|
send_stream->SendAudioData(CreateAudioFrame1kHzSineWave(
|
|
|
|
|
kMaxAudioLevel, kAudioFrameDurationMs, kSampleRateHz, kNumChannels));
|
|
|
|
|
}
|
|
|
|
|
stats = send_stream->GetStats();
|
|
|
|
|
EXPECT_EQ(kMaxAudioLevel, stats.audio_level);
|
|
|
|
|
// Energy increases by energy*duration, where energy is audio level in
|
|
|
|
|
// [0,1].
|
|
|
|
|
EXPECT_NEAR(0.1f, stats.total_input_energy, kTolerance); // 0.1 s of max
|
|
|
|
|
EXPECT_NEAR(0.2f, stats.total_input_duration,
|
|
|
|
|
kTolerance); // 200 ms = 0.2 s
|
|
|
|
|
}
|
[getStats] Implement "media-source" audio levels, fixing Chrome bug.
Implements RTCAudioSourceStats members:
- audioLevel
- totalAudioEnergy
- totalSamplesDuration
In this CL description these are collectively referred to as the audio
levels.
The audio levels are removed from sending "track" stats (in Chrome,
these are now reported as undefined instead of 0).
Background:
For sending tracks, audio levels were always reported as 0 in Chrome
(https://crbug.com/736403), while audio levels were correctly reported
for receiving tracks. This problem affected the standard getStats() but
not the legacy getStats(), blocking some people from migrating. This
was likely not a problem in native third_party/webrtc code because the
delivery of audio frames from device to send-stream uses a different
code path outside of chromium.
A recent PR (https://github.com/w3c/webrtc-stats/pull/451) moved the
send-side audio levels to the RTCAudioSourceStats, while keeping the
receive-side audio levels on the "track" stats. This allows an
implementation to report the audio levels even if samples are not sent
onto the network (such as if an ICE connection has not been established
yet), reflecting some of the current implementation.
Changes:
1. Audio levels are added to RTCAudioSourceStats. Send-side audio
"track" stats are left undefined. Receive-side audio "track" stats
are not changed in this CL and continue to work.
2. Audio level computation is moved from the AudioState and
AudioTransportImpl to the AudioSendStream. This is because a) the
AudioTransportImpl::RecordedDataIsAvailable() code path is not
exercised in chromium, and b) audio levels should, per-spec, not be
calculated on a per-call basis, for which the AudioState is defined.
3. The audio level computation is now performed in
AudioSendStream::SendAudioData(), a code path used by both native
and chromium code.
4. Comments are added to document behavior of existing code, such as
AudioLevel and AudioSendStream::SendAudioData().
Note:
In this CL, just like before this CL, audio level is only calculated
after an AudioSendStream has been created. This means that before an
O/A negotiation, audio levels are unavailable.
According to spec, if we have an audio source, we should have audio
levels. An immediate solution to this would have been to calculate the
audio level at pc/rtp_sender.cc. The problem is that the
LocalAudioSinkAdapter::OnData() code path, while exercised in chromium,
is not exercised in native code. The issue of calculating audio levels
on a per-source bases rather than on a per-send stream basis is left to
https://crbug.com/webrtc/10771, an existing "media-source" bug.
This CL can be verified manually in Chrome at:
https://codepen.io/anon/pen/vqRGyq
Bug: chromium:736403, webrtc:10771
Change-Id: I8036cd9984f3b187c3177470a8c0d6670a201a5a
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/143789
Reviewed-by: Oskar Sundbom <ossu@webrtc.org>
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Commit-Queue: Henrik Boström <hbos@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#28480}
2019-07-03 17:11:10 +02:00
|
|
|
}
|
|
|
|
|
|
2017-07-26 14:18:40 +02:00
|
|
|
TEST(AudioSendStreamTest, SendCodecAppliesAudioNetworkAdaptor) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
2020-11-23 15:05:44 +01:00
|
|
|
ConfigHelper helper(true, true, use_null_audio_processing);
|
2020-04-26 23:56:17 +02:00
|
|
|
helper.config().send_codec_spec =
|
|
|
|
|
AudioSendStream::Config::SendCodecSpec(0, kOpusFormat);
|
|
|
|
|
const std::string kAnaConfigString = "abcde";
|
|
|
|
|
const std::string kAnaReconfigString = "12345";
|
|
|
|
|
|
|
|
|
|
helper.config().audio_network_adaptor_config = kAnaConfigString;
|
|
|
|
|
|
|
|
|
|
EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _, _))
|
|
|
|
|
.WillOnce(Invoke([&kAnaConfigString, &kAnaReconfigString](
|
|
|
|
|
int payload_type, const SdpAudioFormat& format,
|
|
|
|
|
absl::optional<AudioCodecPairId> codec_pair_id,
|
|
|
|
|
std::unique_ptr<AudioEncoder>* return_value) {
|
|
|
|
|
auto mock_encoder = SetupAudioEncoderMock(payload_type, format);
|
|
|
|
|
EXPECT_CALL(*mock_encoder,
|
|
|
|
|
EnableAudioNetworkAdaptor(StrEq(kAnaConfigString), _))
|
|
|
|
|
.WillOnce(Return(true));
|
|
|
|
|
EXPECT_CALL(*mock_encoder,
|
|
|
|
|
EnableAudioNetworkAdaptor(StrEq(kAnaReconfigString), _))
|
|
|
|
|
.WillOnce(Return(true));
|
|
|
|
|
*return_value = std::move(mock_encoder);
|
|
|
|
|
}));
|
|
|
|
|
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
|
|
|
|
|
auto stream_config = helper.config();
|
|
|
|
|
stream_config.audio_network_adaptor_config = kAnaReconfigString;
|
|
|
|
|
|
|
|
|
|
send_stream->Reconfigure(stream_config);
|
|
|
|
|
}
|
2016-10-20 03:27:12 -07:00
|
|
|
}
|
|
|
|
|
|
2020-08-20 16:48:49 +02:00
|
|
|
TEST(AudioSendStreamTest, AudioNetworkAdaptorReceivesOverhead) {
|
|
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
2020-11-23 15:05:44 +01:00
|
|
|
ConfigHelper helper(true, true, use_null_audio_processing);
|
2020-08-20 16:48:49 +02:00
|
|
|
helper.config().send_codec_spec =
|
|
|
|
|
AudioSendStream::Config::SendCodecSpec(0, kOpusFormat);
|
|
|
|
|
const std::string kAnaConfigString = "abcde";
|
|
|
|
|
|
|
|
|
|
EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _, _))
|
|
|
|
|
.WillOnce(Invoke(
|
|
|
|
|
[&kAnaConfigString](int payload_type, const SdpAudioFormat& format,
|
|
|
|
|
absl::optional<AudioCodecPairId> codec_pair_id,
|
|
|
|
|
std::unique_ptr<AudioEncoder>* return_value) {
|
|
|
|
|
auto mock_encoder = SetupAudioEncoderMock(payload_type, format);
|
|
|
|
|
InSequence s;
|
|
|
|
|
EXPECT_CALL(
|
|
|
|
|
*mock_encoder,
|
|
|
|
|
OnReceivedOverhead(Eq(kOverheadPerPacket.bytes<size_t>())))
|
|
|
|
|
.Times(2);
|
|
|
|
|
EXPECT_CALL(*mock_encoder,
|
|
|
|
|
EnableAudioNetworkAdaptor(StrEq(kAnaConfigString), _))
|
|
|
|
|
.WillOnce(Return(true));
|
|
|
|
|
// Note: Overhead is received AFTER ANA has been enabled.
|
|
|
|
|
EXPECT_CALL(
|
|
|
|
|
*mock_encoder,
|
|
|
|
|
OnReceivedOverhead(Eq(kOverheadPerPacket.bytes<size_t>())))
|
|
|
|
|
.WillOnce(Return());
|
|
|
|
|
*return_value = std::move(mock_encoder);
|
|
|
|
|
}));
|
|
|
|
|
EXPECT_CALL(*helper.rtp_rtcp(), ExpectedPerPacketOverhead)
|
|
|
|
|
.WillRepeatedly(Return(kOverheadPerPacket.bytes<size_t>()));
|
|
|
|
|
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
|
|
|
|
|
auto stream_config = helper.config();
|
|
|
|
|
stream_config.audio_network_adaptor_config = kAnaConfigString;
|
|
|
|
|
|
|
|
|
|
send_stream->Reconfigure(stream_config);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-20 03:27:12 -07:00
|
|
|
// VAD is applied when codec is mono and the CNG frequency matches the codec
|
2017-04-27 02:08:52 -07:00
|
|
|
// clock rate.
|
2016-10-20 03:27:12 -07:00
|
|
|
TEST(AudioSendStreamTest, SendCodecCanApplyVad) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, false, use_null_audio_processing);
|
|
|
|
|
helper.config().send_codec_spec =
|
|
|
|
|
AudioSendStream::Config::SendCodecSpec(9, kG722Format);
|
|
|
|
|
helper.config().send_codec_spec->cng_payload_type = 105;
|
|
|
|
|
std::unique_ptr<AudioEncoder> stolen_encoder;
|
2020-05-15 11:40:44 +02:00
|
|
|
EXPECT_CALL(*helper.channel_send(), SetEncoder)
|
|
|
|
|
.WillOnce([&stolen_encoder](int payload_type,
|
|
|
|
|
std::unique_ptr<AudioEncoder> encoder) {
|
|
|
|
|
stolen_encoder = std::move(encoder);
|
|
|
|
|
return true;
|
|
|
|
|
});
|
2020-04-26 23:56:17 +02:00
|
|
|
EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
|
|
|
|
|
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
|
|
|
|
|
// We cannot truly determine if the encoder created is an AudioEncoderCng.
|
|
|
|
|
// It is the only reasonable implementation that will return something from
|
|
|
|
|
// ReclaimContainedEncoders, though.
|
|
|
|
|
ASSERT_TRUE(stolen_encoder);
|
|
|
|
|
EXPECT_FALSE(stolen_encoder->ReclaimContainedEncoders().empty());
|
|
|
|
|
}
|
2016-10-20 03:27:12 -07:00
|
|
|
}
|
|
|
|
|
|
2016-11-30 04:47:39 -08:00
|
|
|
TEST(AudioSendStreamTest, DoesNotPassHigherBitrateThanMaxBitrate) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
EXPECT_CALL(
|
|
|
|
|
*helper.channel_send(),
|
|
|
|
|
OnBitrateAllocation(
|
|
|
|
|
Field(&BitrateAllocationUpdate::target_bitrate,
|
2020-02-17 18:46:07 +01:00
|
|
|
Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps)))));
|
2020-04-26 23:56:17 +02:00
|
|
|
BitrateAllocationUpdate update;
|
|
|
|
|
update.target_bitrate =
|
|
|
|
|
DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
|
|
|
|
|
update.packet_loss_ratio = 0;
|
|
|
|
|
update.round_trip_time = TimeDelta::Millis(50);
|
|
|
|
|
update.bwe_period = TimeDelta::Millis(6000);
|
|
|
|
|
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
|
|
|
|
RTC_FROM_HERE);
|
|
|
|
|
}
|
2016-11-30 04:47:39 -08:00
|
|
|
}
|
|
|
|
|
|
2019-05-03 14:40:13 +02:00
|
|
|
TEST(AudioSendStreamTest, SSBweTargetInRangeRespected) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(true, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
EXPECT_CALL(
|
|
|
|
|
*helper.channel_send(),
|
|
|
|
|
OnBitrateAllocation(Field(
|
|
|
|
|
&BitrateAllocationUpdate::target_bitrate,
|
|
|
|
|
Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000)))));
|
|
|
|
|
BitrateAllocationUpdate update;
|
|
|
|
|
update.target_bitrate =
|
|
|
|
|
DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000);
|
|
|
|
|
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
|
|
|
|
RTC_FROM_HERE);
|
|
|
|
|
}
|
2019-05-03 14:40:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(AudioSendStreamTest, SSBweFieldTrialMinRespected) {
|
|
|
|
|
ScopedFieldTrials field_trials(
|
|
|
|
|
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(true, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
EXPECT_CALL(
|
|
|
|
|
*helper.channel_send(),
|
|
|
|
|
OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
|
|
|
|
|
Eq(DataRate::KilobitsPerSec(6)))));
|
|
|
|
|
BitrateAllocationUpdate update;
|
|
|
|
|
update.target_bitrate = DataRate::KilobitsPerSec(1);
|
|
|
|
|
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
|
|
|
|
RTC_FROM_HERE);
|
|
|
|
|
}
|
2019-05-03 14:40:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(AudioSendStreamTest, SSBweFieldTrialMaxRespected) {
|
|
|
|
|
ScopedFieldTrials field_trials(
|
|
|
|
|
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(true, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
EXPECT_CALL(
|
|
|
|
|
*helper.channel_send(),
|
|
|
|
|
OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
|
|
|
|
|
Eq(DataRate::KilobitsPerSec(64)))));
|
|
|
|
|
BitrateAllocationUpdate update;
|
|
|
|
|
update.target_bitrate = DataRate::KilobitsPerSec(128);
|
|
|
|
|
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
|
|
|
|
RTC_FROM_HERE);
|
|
|
|
|
}
|
2019-05-03 14:40:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(AudioSendStreamTest, SSBweWithOverhead) {
|
|
|
|
|
ScopedFieldTrials field_trials(
|
2019-10-02 12:27:06 +02:00
|
|
|
"WebRTC-Audio-LegacyOverhead/Disabled/");
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(true, true, use_null_audio_processing);
|
2020-05-07 18:18:32 +02:00
|
|
|
EXPECT_CALL(*helper.rtp_rtcp(), ExpectedPerPacketOverhead)
|
|
|
|
|
.WillRepeatedly(Return(kOverheadPerPacket.bytes<size_t>()));
|
2020-04-26 23:56:17 +02:00
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
const DataRate bitrate =
|
|
|
|
|
DataRate::BitsPerSec(helper.config().max_bitrate_bps) +
|
|
|
|
|
kMaxOverheadRate;
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(),
|
|
|
|
|
OnBitrateAllocation(Field(
|
|
|
|
|
&BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
|
|
|
|
|
BitrateAllocationUpdate update;
|
|
|
|
|
update.target_bitrate = bitrate;
|
|
|
|
|
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
|
|
|
|
RTC_FROM_HERE);
|
|
|
|
|
}
|
2019-05-03 14:40:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(AudioSendStreamTest, SSBweWithOverheadMinRespected) {
|
|
|
|
|
ScopedFieldTrials field_trials(
|
2019-10-02 12:27:06 +02:00
|
|
|
"WebRTC-Audio-LegacyOverhead/Disabled/"
|
2019-05-03 14:40:13 +02:00
|
|
|
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(true, true, use_null_audio_processing);
|
2020-05-07 18:18:32 +02:00
|
|
|
EXPECT_CALL(*helper.rtp_rtcp(), ExpectedPerPacketOverhead)
|
|
|
|
|
.WillRepeatedly(Return(kOverheadPerPacket.bytes<size_t>()));
|
2020-04-26 23:56:17 +02:00
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
const DataRate bitrate = DataRate::KilobitsPerSec(6) + kMinOverheadRate;
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(),
|
|
|
|
|
OnBitrateAllocation(Field(
|
|
|
|
|
&BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
|
|
|
|
|
BitrateAllocationUpdate update;
|
|
|
|
|
update.target_bitrate = DataRate::KilobitsPerSec(1);
|
|
|
|
|
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
|
|
|
|
RTC_FROM_HERE);
|
|
|
|
|
}
|
2019-05-03 14:40:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(AudioSendStreamTest, SSBweWithOverheadMaxRespected) {
|
|
|
|
|
ScopedFieldTrials field_trials(
|
2019-10-02 12:27:06 +02:00
|
|
|
"WebRTC-Audio-LegacyOverhead/Disabled/"
|
2019-05-03 14:40:13 +02:00
|
|
|
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(true, true, use_null_audio_processing);
|
2020-05-07 18:18:32 +02:00
|
|
|
EXPECT_CALL(*helper.rtp_rtcp(), ExpectedPerPacketOverhead)
|
|
|
|
|
.WillRepeatedly(Return(kOverheadPerPacket.bytes<size_t>()));
|
2020-04-26 23:56:17 +02:00
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
const DataRate bitrate = DataRate::KilobitsPerSec(64) + kMaxOverheadRate;
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(),
|
|
|
|
|
OnBitrateAllocation(Field(
|
|
|
|
|
&BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
|
|
|
|
|
BitrateAllocationUpdate update;
|
|
|
|
|
update.target_bitrate = DataRate::KilobitsPerSec(128);
|
|
|
|
|
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
|
|
|
|
RTC_FROM_HERE);
|
|
|
|
|
}
|
2019-05-03 14:40:13 +02:00
|
|
|
}
|
|
|
|
|
|
2016-11-30 04:47:39 -08:00
|
|
|
TEST(AudioSendStreamTest, ProbingIntervalOnBitrateUpdated) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(),
|
|
|
|
|
OnBitrateAllocation(Field(&BitrateAllocationUpdate::bwe_period,
|
|
|
|
|
Eq(TimeDelta::Millis(5000)))));
|
|
|
|
|
BitrateAllocationUpdate update;
|
|
|
|
|
update.target_bitrate =
|
|
|
|
|
DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
|
|
|
|
|
update.packet_loss_ratio = 0;
|
|
|
|
|
update.round_trip_time = TimeDelta::Millis(50);
|
|
|
|
|
update.bwe_period = TimeDelta::Millis(5000);
|
|
|
|
|
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
|
|
|
|
RTC_FROM_HERE);
|
|
|
|
|
}
|
2016-11-30 04:47:39 -08:00
|
|
|
}
|
|
|
|
|
|
2017-04-27 02:08:52 -07:00
|
|
|
// Test that AudioSendStream doesn't recreate the encoder unnecessarily.
|
|
|
|
|
TEST(AudioSendStreamTest, DontRecreateEncoder) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, false, use_null_audio_processing);
|
|
|
|
|
// WillOnce is (currently) the default used by ConfigHelper if asked to set
|
|
|
|
|
// an expectation for SetEncoder. Since this behavior is essential for this
|
|
|
|
|
// test to be correct, it's instead set-up manually here. Otherwise a simple
|
|
|
|
|
// change to ConfigHelper (say to WillRepeatedly) would silently make this
|
|
|
|
|
// test useless.
|
2020-05-15 11:40:44 +02:00
|
|
|
EXPECT_CALL(*helper.channel_send(), SetEncoder).WillOnce(Return());
|
2020-04-26 23:56:17 +02:00
|
|
|
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
|
|
|
|
|
|
|
|
|
|
helper.config().send_codec_spec =
|
|
|
|
|
AudioSendStream::Config::SendCodecSpec(9, kG722Format);
|
|
|
|
|
helper.config().send_codec_spec->cng_payload_type = 105;
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
send_stream->Reconfigure(helper.config());
|
|
|
|
|
}
|
2017-04-27 02:08:52 -07:00
|
|
|
}
|
|
|
|
|
|
2017-06-30 01:38:56 -07:00
|
|
|
TEST(AudioSendStreamTest, ReconfigureTransportCcResetsFirst) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
auto new_config = helper.config();
|
|
|
|
|
ConfigHelper::AddBweToConfig(&new_config);
|
|
|
|
|
|
|
|
|
|
EXPECT_CALL(*helper.rtp_rtcp(),
|
|
|
|
|
RegisterRtpHeaderExtension(TransportSequenceNumber::kUri,
|
|
|
|
|
kTransportSequenceNumberId))
|
2017-06-30 01:38:56 -07:00
|
|
|
.Times(1);
|
2020-04-26 23:56:17 +02:00
|
|
|
{
|
|
|
|
|
::testing::InSequence seq;
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(), ResetSenderCongestionControlObjects())
|
|
|
|
|
.Times(1);
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(),
|
|
|
|
|
RegisterSenderCongestionControlObjects(helper.transport(),
|
|
|
|
|
Ne(nullptr)))
|
|
|
|
|
.Times(1);
|
|
|
|
|
}
|
2019-02-04 15:16:06 -08:00
|
|
|
|
2020-04-26 23:56:17 +02:00
|
|
|
send_stream->Reconfigure(new_config);
|
|
|
|
|
}
|
2017-06-30 01:38:56 -07:00
|
|
|
}
|
2018-02-01 16:53:16 +01:00
|
|
|
|
2019-02-04 15:16:06 -08:00
|
|
|
TEST(AudioSendStreamTest, OnTransportOverheadChanged) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
auto new_config = helper.config();
|
2019-02-04 15:16:06 -08:00
|
|
|
|
2020-04-26 23:56:17 +02:00
|
|
|
// CallEncoder will be called on overhead change.
|
2020-05-13 14:43:11 +02:00
|
|
|
EXPECT_CALL(*helper.channel_send(), CallEncoder);
|
2019-02-04 15:16:06 -08:00
|
|
|
|
2020-04-26 23:56:17 +02:00
|
|
|
const size_t transport_overhead_per_packet_bytes = 333;
|
|
|
|
|
send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
|
2019-02-04 15:16:06 -08:00
|
|
|
|
2020-04-26 23:56:17 +02:00
|
|
|
EXPECT_EQ(transport_overhead_per_packet_bytes,
|
|
|
|
|
send_stream->TestOnlyGetPerPacketOverheadBytes());
|
|
|
|
|
}
|
2019-02-04 15:16:06 -08:00
|
|
|
}
|
|
|
|
|
|
2020-05-13 14:43:11 +02:00
|
|
|
TEST(AudioSendStreamTest, DoesntCallEncoderWhenOverheadUnchanged) {
|
|
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
auto new_config = helper.config();
|
|
|
|
|
|
|
|
|
|
// CallEncoder will be called on overhead change.
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(), CallEncoder);
|
|
|
|
|
const size_t transport_overhead_per_packet_bytes = 333;
|
|
|
|
|
send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
|
|
|
|
|
|
|
|
|
|
// Set the same overhead again, CallEncoder should not be called again.
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(), CallEncoder).Times(0);
|
|
|
|
|
send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
|
|
|
|
|
|
|
|
|
|
// New overhead, call CallEncoder again
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(), CallEncoder);
|
|
|
|
|
send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes + 1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-07 18:18:32 +02:00
|
|
|
TEST(AudioSendStreamTest, AudioOverheadChanged) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
2020-05-07 18:18:32 +02:00
|
|
|
const size_t audio_overhead_per_packet_bytes = 555;
|
|
|
|
|
EXPECT_CALL(*helper.rtp_rtcp(), ExpectedPerPacketOverhead)
|
|
|
|
|
.WillRepeatedly(Return(audio_overhead_per_packet_bytes));
|
2020-04-26 23:56:17 +02:00
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
auto new_config = helper.config();
|
|
|
|
|
|
2020-05-07 18:18:32 +02:00
|
|
|
BitrateAllocationUpdate update;
|
|
|
|
|
update.target_bitrate =
|
|
|
|
|
DataRate::BitsPerSec(helper.config().max_bitrate_bps) +
|
|
|
|
|
kMaxOverheadRate;
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(), OnBitrateAllocation);
|
|
|
|
|
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
|
|
|
|
RTC_FROM_HERE);
|
2020-04-26 23:56:17 +02:00
|
|
|
|
|
|
|
|
EXPECT_EQ(audio_overhead_per_packet_bytes,
|
|
|
|
|
send_stream->TestOnlyGetPerPacketOverheadBytes());
|
2020-05-07 18:18:32 +02:00
|
|
|
|
|
|
|
|
EXPECT_CALL(*helper.rtp_rtcp(), ExpectedPerPacketOverhead)
|
|
|
|
|
.WillRepeatedly(Return(audio_overhead_per_packet_bytes + 20));
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(), OnBitrateAllocation);
|
|
|
|
|
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
|
|
|
|
RTC_FROM_HERE);
|
|
|
|
|
|
|
|
|
|
EXPECT_EQ(audio_overhead_per_packet_bytes + 20,
|
|
|
|
|
send_stream->TestOnlyGetPerPacketOverheadBytes());
|
2020-04-26 23:56:17 +02:00
|
|
|
}
|
2019-02-04 15:16:06 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST(AudioSendStreamTest, OnAudioAndTransportOverheadChanged) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
2020-05-07 18:18:32 +02:00
|
|
|
const size_t audio_overhead_per_packet_bytes = 555;
|
|
|
|
|
EXPECT_CALL(*helper.rtp_rtcp(), ExpectedPerPacketOverhead)
|
|
|
|
|
.WillRepeatedly(Return(audio_overhead_per_packet_bytes));
|
2020-04-26 23:56:17 +02:00
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
auto new_config = helper.config();
|
2019-02-04 15:16:06 -08:00
|
|
|
|
2020-04-26 23:56:17 +02:00
|
|
|
const size_t transport_overhead_per_packet_bytes = 333;
|
|
|
|
|
send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
|
2019-02-04 15:16:06 -08:00
|
|
|
|
2020-05-07 18:18:32 +02:00
|
|
|
BitrateAllocationUpdate update;
|
|
|
|
|
update.target_bitrate =
|
|
|
|
|
DataRate::BitsPerSec(helper.config().max_bitrate_bps) +
|
|
|
|
|
kMaxOverheadRate;
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(), OnBitrateAllocation);
|
|
|
|
|
helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
|
|
|
|
|
RTC_FROM_HERE);
|
2019-02-04 15:16:06 -08:00
|
|
|
|
2020-04-26 23:56:17 +02:00
|
|
|
EXPECT_EQ(
|
|
|
|
|
transport_overhead_per_packet_bytes + audio_overhead_per_packet_bytes,
|
|
|
|
|
send_stream->TestOnlyGetPerPacketOverheadBytes());
|
|
|
|
|
}
|
2019-02-04 15:16:06 -08:00
|
|
|
}
|
|
|
|
|
|
2018-10-25 09:52:57 -07:00
|
|
|
// Validates that reconfiguring the AudioSendStream with a Frame encryptor
|
|
|
|
|
// correctly reconfigures on the object without crashing.
|
|
|
|
|
TEST(AudioSendStreamTest, ReconfigureWithFrameEncryptor) {
|
2020-04-26 23:56:17 +02:00
|
|
|
for (bool use_null_audio_processing : {false, true}) {
|
|
|
|
|
ConfigHelper helper(false, true, use_null_audio_processing);
|
|
|
|
|
auto send_stream = helper.CreateAudioSendStream();
|
|
|
|
|
auto new_config = helper.config();
|
|
|
|
|
|
|
|
|
|
rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_0(
|
2021-04-22 19:21:43 +02:00
|
|
|
rtc::make_ref_counted<MockFrameEncryptor>());
|
2020-04-26 23:56:17 +02:00
|
|
|
new_config.frame_encryptor = mock_frame_encryptor_0;
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr)))
|
|
|
|
|
.Times(1);
|
|
|
|
|
send_stream->Reconfigure(new_config);
|
|
|
|
|
|
|
|
|
|
// Not updating the frame encryptor shouldn't force it to reconfigure.
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(_)).Times(0);
|
|
|
|
|
send_stream->Reconfigure(new_config);
|
|
|
|
|
|
|
|
|
|
// Updating frame encryptor to a new object should force a call to the
|
|
|
|
|
// proxy.
|
|
|
|
|
rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_1(
|
2021-04-22 19:21:43 +02:00
|
|
|
rtc::make_ref_counted<MockFrameEncryptor>());
|
2020-04-26 23:56:17 +02:00
|
|
|
new_config.frame_encryptor = mock_frame_encryptor_1;
|
|
|
|
|
new_config.crypto_options.sframe.require_frame_encryption = true;
|
|
|
|
|
EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr)))
|
|
|
|
|
.Times(1);
|
|
|
|
|
send_stream->Reconfigure(new_config);
|
|
|
|
|
}
|
2018-10-25 09:52:57 -07:00
|
|
|
}
|
2015-10-27 03:35:21 -07:00
|
|
|
} // namespace test
|
2015-10-16 14:35:07 -07:00
|
|
|
} // namespace webrtc
|