2011-07-07 08:21:25 +00:00
|
|
|
/*
|
2012-05-02 23:56:37 +00:00
|
|
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
2011-07-07 08:21:25 +00:00
|
|
|
*
|
|
|
|
|
* Use of this source code is governed by a BSD-style license
|
|
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
|
|
|
|
* in the file PATENTS. All contributing project authors may
|
|
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
|
|
|
*/
|
|
|
|
|
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "modules/audio_processing/audio_buffer.h"
|
2011-07-07 08:21:25 +00:00
|
|
|
|
2018-10-23 12:03:01 +02:00
|
|
|
#include <string.h>
|
2019-07-05 19:08:33 +02:00
|
|
|
|
2018-10-23 12:03:01 +02:00
|
|
|
#include <cstdint>
|
|
|
|
|
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "common_audio/channel_buffer.h"
|
|
|
|
|
#include "common_audio/include/audio_util.h"
|
|
|
|
|
#include "common_audio/resampler/push_sinc_resampler.h"
|
2018-10-23 12:03:01 +02:00
|
|
|
#include "modules/audio_processing/splitting_filter.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "rtc_base/checks.h"
|
2011-11-15 16:57:56 +00:00
|
|
|
|
2011-07-07 08:21:25 +00:00
|
|
|
namespace webrtc {
|
|
|
|
|
namespace {
|
|
|
|
|
|
2019-08-21 15:02:37 +02:00
|
|
|
constexpr size_t kSamplesPer32kHzChannel = 320;
|
|
|
|
|
constexpr size_t kSamplesPer48kHzChannel = 480;
|
|
|
|
|
constexpr size_t kSamplesPer192kHzChannel = 1920;
|
|
|
|
|
constexpr size_t kMaxSamplesPerChannel = kSamplesPer192kHzChannel;
|
|
|
|
|
|
|
|
|
|
size_t NumBandsFromFramesPerChannel(size_t num_frames) {
|
|
|
|
|
if (num_frames == kSamplesPer32kHzChannel) {
|
|
|
|
|
return 2;
|
2015-02-10 22:52:15 +00:00
|
|
|
}
|
2019-08-21 15:02:37 +02:00
|
|
|
if (num_frames == kSamplesPer48kHzChannel) {
|
|
|
|
|
return 3;
|
|
|
|
|
}
|
|
|
|
|
return 1;
|
2015-02-10 22:52:15 +00:00
|
|
|
}
|
|
|
|
|
|
2014-04-22 21:00:04 +00:00
|
|
|
} // namespace
|
2011-07-07 08:21:25 +00:00
|
|
|
|
2019-08-21 15:02:37 +02:00
|
|
|
AudioBuffer::AudioBuffer(size_t input_rate,
|
|
|
|
|
size_t input_num_channels,
|
|
|
|
|
size_t buffer_rate,
|
|
|
|
|
size_t buffer_num_channels,
|
|
|
|
|
size_t output_rate)
|
|
|
|
|
: input_num_frames_(
|
|
|
|
|
rtc::CheckedDivExact(static_cast<int>(input_rate), 100)),
|
|
|
|
|
input_num_channels_(input_num_channels),
|
|
|
|
|
buffer_num_frames_(
|
|
|
|
|
rtc::CheckedDivExact(static_cast<int>(buffer_rate), 100)),
|
|
|
|
|
buffer_num_channels_(buffer_num_channels),
|
|
|
|
|
output_num_frames_(
|
|
|
|
|
rtc::CheckedDivExact(static_cast<int>(output_rate), 100)),
|
|
|
|
|
num_channels_(buffer_num_channels),
|
|
|
|
|
num_bands_(NumBandsFromFramesPerChannel(buffer_num_frames_)),
|
|
|
|
|
num_split_frames_(rtc::CheckedDivExact(buffer_num_frames_, num_bands_)),
|
|
|
|
|
data_(new ChannelBuffer<float>(buffer_num_frames_, buffer_num_channels_)),
|
|
|
|
|
output_buffer_(
|
|
|
|
|
new ChannelBuffer<float>(output_num_frames_, num_channels_)) {
|
2016-11-28 15:21:39 -08:00
|
|
|
RTC_DCHECK_GT(input_num_frames_, 0);
|
2019-08-21 15:02:37 +02:00
|
|
|
RTC_DCHECK_GT(buffer_num_frames_, 0);
|
2016-11-28 15:21:39 -08:00
|
|
|
RTC_DCHECK_GT(output_num_frames_, 0);
|
2019-08-21 15:02:37 +02:00
|
|
|
RTC_DCHECK_GT(input_num_channels_, 0);
|
|
|
|
|
RTC_DCHECK_GT(buffer_num_channels_, 0);
|
|
|
|
|
RTC_DCHECK_LE(buffer_num_channels_, input_num_channels_);
|
|
|
|
|
|
|
|
|
|
const bool input_resampling_needed = input_num_frames_ != buffer_num_frames_;
|
|
|
|
|
const bool output_resampling_needed =
|
|
|
|
|
output_num_frames_ != buffer_num_frames_;
|
|
|
|
|
if (input_resampling_needed) {
|
|
|
|
|
for (size_t i = 0; i < buffer_num_channels_; ++i) {
|
|
|
|
|
input_resamplers_.push_back(std::unique_ptr<PushSincResampler>(
|
|
|
|
|
new PushSincResampler(input_num_frames_, buffer_num_frames_)));
|
2014-04-22 21:00:04 +00:00
|
|
|
}
|
2019-08-21 15:02:37 +02:00
|
|
|
}
|
2014-04-22 21:00:04 +00:00
|
|
|
|
2019-08-21 15:02:37 +02:00
|
|
|
if (output_resampling_needed) {
|
|
|
|
|
for (size_t i = 0; i < buffer_num_channels_; ++i) {
|
|
|
|
|
output_resamplers_.push_back(std::unique_ptr<PushSincResampler>(
|
|
|
|
|
new PushSincResampler(buffer_num_frames_, output_num_frames_)));
|
2014-04-22 21:00:04 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-10 22:52:15 +00:00
|
|
|
if (num_bands_ > 1) {
|
2019-08-21 15:02:37 +02:00
|
|
|
split_data_.reset(new ChannelBuffer<float>(
|
|
|
|
|
buffer_num_frames_, buffer_num_channels_, num_bands_));
|
|
|
|
|
splitting_filter_.reset(new SplittingFilter(
|
|
|
|
|
buffer_num_channels_, num_bands_, buffer_num_frames_));
|
2014-04-22 21:00:04 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-24 18:28:56 +00:00
|
|
|
AudioBuffer::~AudioBuffer() {}
|
|
|
|
|
|
2019-08-21 15:02:37 +02:00
|
|
|
void AudioBuffer::set_downmixing_to_specific_channel(size_t channel) {
|
|
|
|
|
downmix_by_averaging_ = false;
|
|
|
|
|
RTC_DCHECK_GT(input_num_channels_, channel);
|
|
|
|
|
channel_for_downmixing_ = std::min(channel, input_num_channels_ - 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioBuffer::set_downmixing_by_averaging() {
|
|
|
|
|
downmix_by_averaging_ = true;
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-22 21:00:04 +00:00
|
|
|
void AudioBuffer::CopyFrom(const float* const* data,
|
2015-07-23 11:41:39 -07:00
|
|
|
const StreamConfig& stream_config) {
|
2016-09-14 05:23:22 -07:00
|
|
|
RTC_DCHECK_EQ(stream_config.num_frames(), input_num_frames_);
|
2019-08-21 15:02:37 +02:00
|
|
|
RTC_DCHECK_EQ(stream_config.num_channels(), input_num_channels_);
|
|
|
|
|
RestoreNumChannels();
|
|
|
|
|
const bool downmix_needed = input_num_channels_ > 1 && num_channels_ == 1;
|
|
|
|
|
|
|
|
|
|
const bool resampling_needed = input_num_frames_ != buffer_num_frames_;
|
|
|
|
|
|
|
|
|
|
if (downmix_needed) {
|
|
|
|
|
RTC_DCHECK_GT(kMaxSamplesPerChannel, input_num_frames_);
|
|
|
|
|
|
|
|
|
|
std::array<float, kMaxSamplesPerChannel> downmix;
|
|
|
|
|
if (downmix_by_averaging_) {
|
|
|
|
|
const float kOneByNumChannels = 1.f / input_num_channels_;
|
|
|
|
|
for (size_t i = 0; i < input_num_frames_; ++i) {
|
|
|
|
|
float value = data[0][i];
|
|
|
|
|
for (size_t j = 1; j < input_num_channels_; ++j) {
|
|
|
|
|
value += data[j][i];
|
|
|
|
|
}
|
|
|
|
|
downmix[i] = value * kOneByNumChannels;
|
|
|
|
|
}
|
2014-04-22 21:00:04 +00:00
|
|
|
}
|
2019-08-21 15:02:37 +02:00
|
|
|
const float* downmixed_data =
|
|
|
|
|
downmix_by_averaging_ ? downmix.data() : data[channel_for_downmixing_];
|
2014-04-22 21:00:04 +00:00
|
|
|
|
2019-08-21 15:02:37 +02:00
|
|
|
if (resampling_needed) {
|
|
|
|
|
input_resamplers_[0]->Resample(downmixed_data, input_num_frames_,
|
|
|
|
|
data_->channels()[0], buffer_num_frames_);
|
|
|
|
|
}
|
|
|
|
|
const float* data_to_convert =
|
|
|
|
|
resampling_needed ? data_->channels()[0] : downmixed_data;
|
|
|
|
|
FloatToFloatS16(data_to_convert, buffer_num_frames_, data_->channels()[0]);
|
|
|
|
|
} else {
|
|
|
|
|
if (resampling_needed) {
|
|
|
|
|
for (size_t i = 0; i < num_channels_; ++i) {
|
|
|
|
|
input_resamplers_[i]->Resample(data[i], input_num_frames_,
|
|
|
|
|
data_->channels()[i],
|
|
|
|
|
buffer_num_frames_);
|
|
|
|
|
FloatToFloatS16(data_->channels()[i], buffer_num_frames_,
|
|
|
|
|
data_->channels()[i]);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
for (size_t i = 0; i < num_channels_; ++i) {
|
|
|
|
|
FloatToFloatS16(data[i], buffer_num_frames_, data_->channels()[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-04-22 21:00:04 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-23 11:41:39 -07:00
|
|
|
void AudioBuffer::CopyTo(const StreamConfig& stream_config,
|
2014-04-22 21:00:04 +00:00
|
|
|
float* const* data) {
|
2016-09-14 05:23:22 -07:00
|
|
|
RTC_DCHECK_EQ(stream_config.num_frames(), output_num_frames_);
|
2014-04-22 21:00:04 +00:00
|
|
|
|
2019-08-21 15:02:37 +02:00
|
|
|
const bool resampling_needed = output_num_frames_ != buffer_num_frames_;
|
|
|
|
|
if (resampling_needed) {
|
Convert channel counts to size_t.
IIRC, this was originally requested by ajm during review of the other size_t conversions I did over the past year, and I agreed it made sense, but wanted to do it separately since those changes were already gargantuan.
BUG=chromium:81439
TEST=none
R=henrik.lundin@webrtc.org, henrika@webrtc.org, kjellander@webrtc.org, minyue@webrtc.org, perkj@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org, tina.legrand@webrtc.org
Review URL: https://codereview.webrtc.org/1316523002 .
Cr-Commit-Position: refs/heads/master@{#11229}
2016-01-12 16:26:35 -08:00
|
|
|
for (size_t i = 0; i < num_channels_; ++i) {
|
2019-08-21 15:02:37 +02:00
|
|
|
FloatS16ToFloat(data_->channels()[i], buffer_num_frames_,
|
|
|
|
|
data_->channels()[i]);
|
|
|
|
|
output_resamplers_[i]->Resample(data_->channels()[i], buffer_num_frames_,
|
|
|
|
|
data[i], output_num_frames_);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
for (size_t i = 0; i < num_channels_; ++i) {
|
|
|
|
|
FloatS16ToFloat(data_->channels()[i], buffer_num_frames_, data[i]);
|
2014-04-22 21:00:04 +00:00
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
2016-01-11 20:32:29 -08:00
|
|
|
|
Convert channel counts to size_t.
IIRC, this was originally requested by ajm during review of the other size_t conversions I did over the past year, and I agreed it made sense, but wanted to do it separately since those changes were already gargantuan.
BUG=chromium:81439
TEST=none
R=henrik.lundin@webrtc.org, henrika@webrtc.org, kjellander@webrtc.org, minyue@webrtc.org, perkj@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org, tina.legrand@webrtc.org
Review URL: https://codereview.webrtc.org/1316523002 .
Cr-Commit-Position: refs/heads/master@{#11229}
2016-01-12 16:26:35 -08:00
|
|
|
for (size_t i = num_channels_; i < stream_config.num_channels(); ++i) {
|
2016-01-11 20:32:29 -08:00
|
|
|
memcpy(data[i], data[0], output_num_frames_ * sizeof(**data));
|
|
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2019-08-21 15:02:37 +02:00
|
|
|
void AudioBuffer::RestoreNumChannels() {
|
|
|
|
|
num_channels_ = buffer_num_channels_;
|
|
|
|
|
data_->set_num_channels(buffer_num_channels_);
|
2015-02-10 22:52:15 +00:00
|
|
|
if (split_data_.get()) {
|
2019-08-21 15:02:37 +02:00
|
|
|
split_data_->set_num_channels(buffer_num_channels_);
|
2014-12-03 01:06:35 +00:00
|
|
|
}
|
2014-09-25 20:52:08 +00:00
|
|
|
}
|
|
|
|
|
|
Convert channel counts to size_t.
IIRC, this was originally requested by ajm during review of the other size_t conversions I did over the past year, and I agreed it made sense, but wanted to do it separately since those changes were already gargantuan.
BUG=chromium:81439
TEST=none
R=henrik.lundin@webrtc.org, henrika@webrtc.org, kjellander@webrtc.org, minyue@webrtc.org, perkj@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org, tina.legrand@webrtc.org
Review URL: https://codereview.webrtc.org/1316523002 .
Cr-Commit-Position: refs/heads/master@{#11229}
2016-01-12 16:26:35 -08:00
|
|
|
void AudioBuffer::set_num_channels(size_t num_channels) {
|
2019-08-21 15:02:37 +02:00
|
|
|
RTC_DCHECK_GE(buffer_num_channels_, num_channels);
|
2014-12-11 17:09:21 +00:00
|
|
|
num_channels_ = num_channels;
|
2016-06-30 15:33:37 -07:00
|
|
|
data_->set_num_channels(num_channels);
|
|
|
|
|
if (split_data_.get()) {
|
|
|
|
|
split_data_->set_num_channels(num_channels);
|
|
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2015-05-20 14:39:39 -07:00
|
|
|
// The resampler is only for supporting 48kHz to 16kHz in the reverse stream.
|
2019-08-21 15:02:37 +02:00
|
|
|
void AudioBuffer::CopyFrom(const AudioFrame* frame) {
|
|
|
|
|
RTC_DCHECK_EQ(frame->num_channels_, input_num_channels_);
|
2016-09-14 05:23:22 -07:00
|
|
|
RTC_DCHECK_EQ(frame->samples_per_channel_, input_num_frames_);
|
2019-08-21 15:02:37 +02:00
|
|
|
RestoreNumChannels();
|
|
|
|
|
|
|
|
|
|
const bool resampling_required = input_num_frames_ != buffer_num_frames_;
|
|
|
|
|
|
|
|
|
|
const int16_t* interleaved = frame->data();
|
|
|
|
|
if (num_channels_ == 1) {
|
|
|
|
|
if (input_num_channels_ == 1) {
|
|
|
|
|
if (resampling_required) {
|
|
|
|
|
std::array<float, kMaxSamplesPerChannel> float_buffer;
|
|
|
|
|
S16ToFloatS16(interleaved, input_num_frames_, float_buffer.data());
|
|
|
|
|
input_resamplers_[0]->Resample(float_buffer.data(), input_num_frames_,
|
|
|
|
|
data_->channels()[0],
|
|
|
|
|
buffer_num_frames_);
|
|
|
|
|
} else {
|
|
|
|
|
S16ToFloatS16(interleaved, input_num_frames_, data_->channels()[0]);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
std::array<float, kMaxSamplesPerChannel> float_buffer;
|
|
|
|
|
float* downmixed_data =
|
|
|
|
|
resampling_required ? float_buffer.data() : data_->channels()[0];
|
|
|
|
|
if (downmix_by_averaging_) {
|
|
|
|
|
for (size_t j = 0, k = 0; j < input_num_frames_; ++j) {
|
|
|
|
|
int32_t sum = 0;
|
|
|
|
|
for (size_t i = 0; i < input_num_channels_; ++i, ++k) {
|
|
|
|
|
sum += interleaved[k];
|
|
|
|
|
}
|
|
|
|
|
downmixed_data[j] = sum / static_cast<int16_t>(input_num_channels_);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
for (size_t j = 0, k = channel_for_downmixing_; j < input_num_frames_;
|
|
|
|
|
++j, k += input_num_channels_) {
|
|
|
|
|
downmixed_data[j] = interleaved[k];
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
|
2019-08-21 15:02:37 +02:00
|
|
|
if (resampling_required) {
|
|
|
|
|
input_resamplers_[0]->Resample(downmixed_data, input_num_frames_,
|
|
|
|
|
data_->channels()[0],
|
|
|
|
|
buffer_num_frames_);
|
|
|
|
|
}
|
|
|
|
|
}
|
Enable render downmixing to mono in AudioProcessing.
In practice, we have been doing this since time immemorial, but have
relied on the user to do the downmixing (first voice engine then
Chromium). It's more logical for this burden to fall on AudioProcessing,
however, who can be expected to know that this is a reasonable approach
for AEC. Permitting two render channels results in running two AECs
serially.
Critically, in my recent change to have Chromium adopt the float
interface:
https://codereview.chromium.org/420603004
I removed the downmixing by Chromium, forgetting that we hadn't yet
enabled this feature in AudioProcessing. This corrects that oversight.
The change in paths hit by production users is very minor. As commented
it required adding downmixing to the int16_t path to satisfy
bit-exactness tests.
For reference, find the ApmTest.Process errors here:
https://paste.googleplex.com/6372007910309888
BUG=webrtc:3853
TESTED=listened to the files output from the Process test, and verified
that they sound as expected: higher echo while the AEC is adapting, but
afterwards very close.
R=aluebs@webrtc.org, bjornv@webrtc.org, kwiberg@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/31459004
git-svn-id: http://webrtc.googlecode.com/svn/trunk@7292 4adac7df-926f-26a2-2b94-8c16560cd09d
2014-09-24 20:06:23 +00:00
|
|
|
} else {
|
2019-08-21 15:02:37 +02:00
|
|
|
auto deinterleave_channel = [](size_t channel, size_t num_channels,
|
|
|
|
|
size_t samples_per_channel, const int16_t* x,
|
|
|
|
|
float* y) {
|
|
|
|
|
for (size_t j = 0, k = channel; j < samples_per_channel;
|
|
|
|
|
++j, k += num_channels) {
|
|
|
|
|
y[j] = x[k];
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if (resampling_required) {
|
|
|
|
|
std::array<float, kMaxSamplesPerChannel> float_buffer;
|
|
|
|
|
for (size_t i = 0; i < num_channels_; ++i) {
|
|
|
|
|
deinterleave_channel(i, num_channels_, input_num_frames_, interleaved,
|
|
|
|
|
float_buffer.data());
|
|
|
|
|
input_resamplers_[i]->Resample(float_buffer.data(), input_num_frames_,
|
|
|
|
|
data_->channels()[i],
|
|
|
|
|
buffer_num_frames_);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
for (size_t i = 0; i < num_channels_; ++i) {
|
|
|
|
|
deinterleave_channel(i, num_channels_, input_num_frames_, interleaved,
|
|
|
|
|
data_->channels()[i]);
|
|
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-21 15:02:37 +02:00
|
|
|
void AudioBuffer::CopyTo(AudioFrame* frame) const {
|
2016-09-14 05:23:22 -07:00
|
|
|
RTC_DCHECK(frame->num_channels_ == num_channels_ || num_channels_ == 1);
|
|
|
|
|
RTC_DCHECK_EQ(frame->samples_per_channel_, output_num_frames_);
|
2015-08-14 10:35:55 -07:00
|
|
|
|
2019-08-21 15:02:37 +02:00
|
|
|
const bool resampling_required = buffer_num_frames_ != output_num_frames_;
|
|
|
|
|
|
|
|
|
|
int16_t* interleaved = frame->mutable_data();
|
|
|
|
|
if (num_channels_ == 1) {
|
|
|
|
|
std::array<float, kMaxSamplesPerChannel> float_buffer;
|
|
|
|
|
|
|
|
|
|
if (resampling_required) {
|
|
|
|
|
output_resamplers_[0]->Resample(data_->channels()[0], buffer_num_frames_,
|
|
|
|
|
float_buffer.data(), output_num_frames_);
|
2015-08-14 10:35:55 -07:00
|
|
|
}
|
2019-08-21 15:02:37 +02:00
|
|
|
const float* deinterleaved =
|
|
|
|
|
resampling_required ? float_buffer.data() : data_->channels()[0];
|
2015-08-14 10:35:55 -07:00
|
|
|
|
2019-08-21 15:02:37 +02:00
|
|
|
if (frame->num_channels_ == 1) {
|
|
|
|
|
for (size_t j = 0; j < output_num_frames_; ++j) {
|
|
|
|
|
interleaved[j] = FloatS16ToS16(deinterleaved[j]);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
for (size_t i = 0, k = 0; i < output_num_frames_; ++i) {
|
|
|
|
|
float tmp = FloatS16ToS16(deinterleaved[i]);
|
|
|
|
|
for (size_t j = 0; j < frame->num_channels_; ++j, ++k) {
|
|
|
|
|
interleaved[k] = tmp;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-08-14 10:35:55 -07:00
|
|
|
} else {
|
2019-08-21 15:02:37 +02:00
|
|
|
auto interleave_channel = [](size_t channel, size_t num_channels,
|
|
|
|
|
size_t samples_per_channel, const float* x,
|
|
|
|
|
int16_t* y) {
|
|
|
|
|
for (size_t k = 0, j = channel; k < samples_per_channel;
|
|
|
|
|
++k, j += num_channels) {
|
|
|
|
|
y[j] = FloatS16ToS16(x[k]);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if (resampling_required) {
|
|
|
|
|
for (size_t i = 0; i < num_channels_; ++i) {
|
|
|
|
|
std::array<float, kMaxSamplesPerChannel> float_buffer;
|
|
|
|
|
output_resamplers_[i]->Resample(data_->channels()[i],
|
|
|
|
|
buffer_num_frames_, float_buffer.data(),
|
|
|
|
|
output_num_frames_);
|
|
|
|
|
interleave_channel(i, frame->num_channels_, output_num_frames_,
|
|
|
|
|
float_buffer.data(), interleaved);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
for (size_t i = 0; i < num_channels_; ++i) {
|
|
|
|
|
interleave_channel(i, frame->num_channels_, output_num_frames_,
|
|
|
|
|
data_->channels()[i], interleaved);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (size_t i = num_channels_; i < frame->num_channels_; ++i) {
|
|
|
|
|
for (size_t j = 0, k = i, n = num_channels_; j < output_num_frames_;
|
|
|
|
|
++j, k += frame->num_channels_, n += frame->num_channels_) {
|
|
|
|
|
interleaved[k] = interleaved[n];
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-08-14 10:35:55 -07:00
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2014-11-14 22:18:10 +00:00
|
|
|
void AudioBuffer::SplitIntoFrequencyBands() {
|
2015-02-10 22:52:15 +00:00
|
|
|
splitting_filter_->Analysis(data_.get(), split_data_.get());
|
2014-11-14 22:18:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioBuffer::MergeFrequencyBands() {
|
2015-02-10 22:52:15 +00:00
|
|
|
splitting_filter_->Synthesis(split_data_.get(), data_.get());
|
2014-11-14 22:18:10 +00:00
|
|
|
}
|
|
|
|
|
|
2019-08-21 15:02:37 +02:00
|
|
|
void AudioBuffer::ExportSplitChannelData(size_t channel,
|
2019-08-20 09:19:21 +02:00
|
|
|
int16_t* const* split_band_data) {
|
|
|
|
|
for (size_t k = 0; k < num_bands(); ++k) {
|
2019-08-21 15:02:37 +02:00
|
|
|
const float* band_data = split_bands(channel)[k];
|
|
|
|
|
|
2019-08-20 09:19:21 +02:00
|
|
|
RTC_DCHECK(split_band_data[k]);
|
|
|
|
|
RTC_DCHECK(band_data);
|
|
|
|
|
for (size_t i = 0; i < num_frames_per_band(); ++i) {
|
|
|
|
|
split_band_data[k][i] = FloatS16ToS16(band_data[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-21 15:02:37 +02:00
|
|
|
void AudioBuffer::ImportSplitChannelData(
|
2019-08-20 09:19:21 +02:00
|
|
|
size_t channel,
|
|
|
|
|
const int16_t* const* split_band_data) {
|
|
|
|
|
for (size_t k = 0; k < num_bands(); ++k) {
|
2019-08-21 15:02:37 +02:00
|
|
|
float* band_data = split_bands(channel)[k];
|
2019-08-20 09:19:21 +02:00
|
|
|
RTC_DCHECK(split_band_data[k]);
|
|
|
|
|
RTC_DCHECK(band_data);
|
|
|
|
|
for (size_t i = 0; i < num_frames_per_band(); ++i) {
|
|
|
|
|
band_data[i] = split_band_data[k][i];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-07-07 08:21:25 +00:00
|
|
|
} // namespace webrtc
|