2011-07-07 08:21:25 +00:00
|
|
|
/*
|
2012-05-02 23:56:37 +00:00
|
|
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
2011-07-07 08:21:25 +00:00
|
|
|
*
|
|
|
|
|
* Use of this source code is governed by a BSD-style license
|
|
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
|
|
|
|
* in the file PATENTS. All contributing project authors may
|
|
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
|
|
|
*/
|
|
|
|
|
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "modules/audio_processing/audio_buffer.h"
|
2011-07-07 08:21:25 +00:00
|
|
|
|
2018-10-23 12:03:01 +02:00
|
|
|
#include <string.h>
|
2019-07-05 19:08:33 +02:00
|
|
|
|
2018-10-23 12:03:01 +02:00
|
|
|
#include <cstdint>
|
|
|
|
|
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "common_audio/channel_buffer.h"
|
|
|
|
|
#include "common_audio/include/audio_util.h"
|
|
|
|
|
#include "common_audio/resampler/push_sinc_resampler.h"
|
2018-10-23 12:03:01 +02:00
|
|
|
#include "modules/audio_processing/splitting_filter.h"
|
2017-09-15 06:47:31 +02:00
|
|
|
#include "rtc_base/checks.h"
|
2011-11-15 16:57:56 +00:00
|
|
|
|
2011-07-07 08:21:25 +00:00
|
|
|
namespace webrtc {
|
|
|
|
|
namespace {
|
|
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
const size_t kSamplesPer16kHzChannel = 160;
|
|
|
|
|
const size_t kSamplesPer32kHzChannel = 320;
|
|
|
|
|
const size_t kSamplesPer48kHzChannel = 480;
|
|
|
|
|
|
|
|
|
|
size_t NumBandsFromSamplesPerChannel(size_t num_frames) {
|
|
|
|
|
size_t num_bands = 1;
|
|
|
|
|
if (num_frames == kSamplesPer32kHzChannel ||
|
|
|
|
|
num_frames == kSamplesPer48kHzChannel) {
|
|
|
|
|
num_bands = rtc::CheckedDivExact(num_frames, kSamplesPer16kHzChannel);
|
2015-02-10 22:52:15 +00:00
|
|
|
}
|
2019-08-21 17:52:28 +00:00
|
|
|
return num_bands;
|
2015-02-10 22:52:15 +00:00
|
|
|
}
|
|
|
|
|
|
2014-04-22 21:00:04 +00:00
|
|
|
} // namespace
|
2011-07-07 08:21:25 +00:00
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
AudioBuffer::AudioBuffer(size_t input_num_frames,
|
|
|
|
|
size_t num_input_channels,
|
|
|
|
|
size_t process_num_frames,
|
|
|
|
|
size_t num_process_channels,
|
|
|
|
|
size_t output_num_frames)
|
|
|
|
|
: input_num_frames_(input_num_frames),
|
|
|
|
|
num_input_channels_(num_input_channels),
|
|
|
|
|
proc_num_frames_(process_num_frames),
|
|
|
|
|
num_proc_channels_(num_process_channels),
|
|
|
|
|
output_num_frames_(output_num_frames),
|
|
|
|
|
num_channels_(num_process_channels),
|
|
|
|
|
num_bands_(NumBandsFromSamplesPerChannel(proc_num_frames_)),
|
|
|
|
|
num_split_frames_(rtc::CheckedDivExact(proc_num_frames_, num_bands_)),
|
|
|
|
|
data_(new IFChannelBuffer(proc_num_frames_, num_proc_channels_)),
|
|
|
|
|
output_buffer_(new IFChannelBuffer(output_num_frames_, num_channels_)) {
|
2016-11-28 15:21:39 -08:00
|
|
|
RTC_DCHECK_GT(input_num_frames_, 0);
|
2019-08-21 17:52:28 +00:00
|
|
|
RTC_DCHECK_GT(proc_num_frames_, 0);
|
2016-11-28 15:21:39 -08:00
|
|
|
RTC_DCHECK_GT(output_num_frames_, 0);
|
2019-08-21 17:52:28 +00:00
|
|
|
RTC_DCHECK_GT(num_input_channels_, 0);
|
|
|
|
|
RTC_DCHECK_GT(num_proc_channels_, 0);
|
|
|
|
|
RTC_DCHECK_LE(num_proc_channels_, num_input_channels_);
|
|
|
|
|
|
|
|
|
|
if (input_num_frames_ != proc_num_frames_ ||
|
|
|
|
|
output_num_frames_ != proc_num_frames_) {
|
|
|
|
|
// Create an intermediate buffer for resampling.
|
|
|
|
|
process_buffer_.reset(
|
|
|
|
|
new ChannelBuffer<float>(proc_num_frames_, num_proc_channels_));
|
|
|
|
|
|
|
|
|
|
if (input_num_frames_ != proc_num_frames_) {
|
|
|
|
|
for (size_t i = 0; i < num_proc_channels_; ++i) {
|
|
|
|
|
input_resamplers_.push_back(std::unique_ptr<PushSincResampler>(
|
|
|
|
|
new PushSincResampler(input_num_frames_, proc_num_frames_)));
|
|
|
|
|
}
|
2014-04-22 21:00:04 +00:00
|
|
|
}
|
|
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
if (output_num_frames_ != proc_num_frames_) {
|
|
|
|
|
for (size_t i = 0; i < num_proc_channels_; ++i) {
|
|
|
|
|
output_resamplers_.push_back(std::unique_ptr<PushSincResampler>(
|
|
|
|
|
new PushSincResampler(proc_num_frames_, output_num_frames_)));
|
|
|
|
|
}
|
2014-04-22 21:00:04 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-10 22:52:15 +00:00
|
|
|
if (num_bands_ > 1) {
|
2019-08-21 17:52:28 +00:00
|
|
|
split_data_.reset(
|
|
|
|
|
new IFChannelBuffer(proc_num_frames_, num_proc_channels_, num_bands_));
|
|
|
|
|
splitting_filter_.reset(
|
|
|
|
|
new SplittingFilter(num_proc_channels_, num_bands_, proc_num_frames_));
|
2014-04-22 21:00:04 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-24 18:28:56 +00:00
|
|
|
AudioBuffer::~AudioBuffer() {}
|
|
|
|
|
|
2014-04-22 21:00:04 +00:00
|
|
|
void AudioBuffer::CopyFrom(const float* const* data,
|
2015-07-23 11:41:39 -07:00
|
|
|
const StreamConfig& stream_config) {
|
2016-09-14 05:23:22 -07:00
|
|
|
RTC_DCHECK_EQ(stream_config.num_frames(), input_num_frames_);
|
2019-08-21 17:52:28 +00:00
|
|
|
RTC_DCHECK_EQ(stream_config.num_channels(), num_input_channels_);
|
|
|
|
|
InitForNewData();
|
|
|
|
|
// Initialized lazily because there's a different condition in
|
|
|
|
|
// DeinterleaveFrom.
|
|
|
|
|
const bool need_to_downmix =
|
|
|
|
|
num_input_channels_ > 1 && num_proc_channels_ == 1;
|
|
|
|
|
if (need_to_downmix && !input_buffer_) {
|
|
|
|
|
input_buffer_.reset(
|
|
|
|
|
new IFChannelBuffer(input_num_frames_, num_proc_channels_));
|
|
|
|
|
}
|
2014-04-22 21:00:04 +00:00
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
// Downmix.
|
|
|
|
|
const float* const* data_ptr = data;
|
|
|
|
|
if (need_to_downmix) {
|
|
|
|
|
DownmixToMono<float, float>(data, input_num_frames_, num_input_channels_,
|
|
|
|
|
input_buffer_->fbuf()->channels()[0]);
|
|
|
|
|
data_ptr = input_buffer_->fbuf_const()->channels();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Resample.
|
|
|
|
|
if (input_num_frames_ != proc_num_frames_) {
|
|
|
|
|
for (size_t i = 0; i < num_proc_channels_; ++i) {
|
|
|
|
|
input_resamplers_[i]->Resample(data_ptr[i], input_num_frames_,
|
|
|
|
|
process_buffer_->channels()[i],
|
|
|
|
|
proc_num_frames_);
|
2019-08-21 15:02:37 +02:00
|
|
|
}
|
2019-08-21 17:52:28 +00:00
|
|
|
data_ptr = process_buffer_->channels();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Convert to the S16 range.
|
|
|
|
|
for (size_t i = 0; i < num_proc_channels_; ++i) {
|
|
|
|
|
FloatToFloatS16(data_ptr[i], proc_num_frames_,
|
|
|
|
|
data_->fbuf()->channels()[i]);
|
2014-04-22 21:00:04 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-23 11:41:39 -07:00
|
|
|
void AudioBuffer::CopyTo(const StreamConfig& stream_config,
|
2014-04-22 21:00:04 +00:00
|
|
|
float* const* data) {
|
2016-09-14 05:23:22 -07:00
|
|
|
RTC_DCHECK_EQ(stream_config.num_frames(), output_num_frames_);
|
2019-08-21 17:52:28 +00:00
|
|
|
RTC_DCHECK(stream_config.num_channels() == num_channels_ ||
|
|
|
|
|
num_channels_ == 1);
|
|
|
|
|
|
|
|
|
|
// Convert to the float range.
|
|
|
|
|
float* const* data_ptr = data;
|
|
|
|
|
if (output_num_frames_ != proc_num_frames_) {
|
|
|
|
|
// Convert to an intermediate buffer for subsequent resampling.
|
|
|
|
|
data_ptr = process_buffer_->channels();
|
|
|
|
|
}
|
|
|
|
|
for (size_t i = 0; i < num_channels_; ++i) {
|
|
|
|
|
FloatS16ToFloat(data_->fbuf()->channels()[i], proc_num_frames_,
|
|
|
|
|
data_ptr[i]);
|
|
|
|
|
}
|
2014-04-22 21:00:04 +00:00
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
// Resample.
|
|
|
|
|
if (output_num_frames_ != proc_num_frames_) {
|
Convert channel counts to size_t.
IIRC, this was originally requested by ajm during review of the other size_t conversions I did over the past year, and I agreed it made sense, but wanted to do it separately since those changes were already gargantuan.
BUG=chromium:81439
TEST=none
R=henrik.lundin@webrtc.org, henrika@webrtc.org, kjellander@webrtc.org, minyue@webrtc.org, perkj@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org, tina.legrand@webrtc.org
Review URL: https://codereview.webrtc.org/1316523002 .
Cr-Commit-Position: refs/heads/master@{#11229}
2016-01-12 16:26:35 -08:00
|
|
|
for (size_t i = 0; i < num_channels_; ++i) {
|
2019-08-21 17:52:28 +00:00
|
|
|
output_resamplers_[i]->Resample(data_ptr[i], proc_num_frames_, data[i],
|
|
|
|
|
output_num_frames_);
|
2014-04-22 21:00:04 +00:00
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
2016-01-11 20:32:29 -08:00
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
// Upmix.
|
Convert channel counts to size_t.
IIRC, this was originally requested by ajm during review of the other size_t conversions I did over the past year, and I agreed it made sense, but wanted to do it separately since those changes were already gargantuan.
BUG=chromium:81439
TEST=none
R=henrik.lundin@webrtc.org, henrika@webrtc.org, kjellander@webrtc.org, minyue@webrtc.org, perkj@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org, tina.legrand@webrtc.org
Review URL: https://codereview.webrtc.org/1316523002 .
Cr-Commit-Position: refs/heads/master@{#11229}
2016-01-12 16:26:35 -08:00
|
|
|
for (size_t i = num_channels_; i < stream_config.num_channels(); ++i) {
|
2016-01-11 20:32:29 -08:00
|
|
|
memcpy(data[i], data[0], output_num_frames_ * sizeof(**data));
|
|
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
void AudioBuffer::InitForNewData() {
|
|
|
|
|
num_channels_ = num_proc_channels_;
|
|
|
|
|
data_->set_num_channels(num_proc_channels_);
|
|
|
|
|
if (split_data_.get()) {
|
|
|
|
|
split_data_->set_num_channels(num_proc_channels_);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const float* const* AudioBuffer::split_channels_const_f(Band band) const {
|
2015-02-10 22:52:15 +00:00
|
|
|
if (split_data_.get()) {
|
2019-08-21 17:52:28 +00:00
|
|
|
return split_data_->fbuf_const()->channels(band);
|
|
|
|
|
} else {
|
|
|
|
|
return band == kBand0To8kHz ? data_->fbuf_const()->channels() : nullptr;
|
2014-12-03 01:06:35 +00:00
|
|
|
}
|
2014-09-25 20:52:08 +00:00
|
|
|
}
|
|
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
const float* const* AudioBuffer::channels_const_f() const {
|
|
|
|
|
return data_->fbuf_const()->channels();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
float* const* AudioBuffer::channels_f() {
|
|
|
|
|
return data_->fbuf()->channels();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const float* const* AudioBuffer::split_bands_const_f(size_t channel) const {
|
|
|
|
|
return split_data_.get() ? split_data_->fbuf_const()->bands(channel)
|
|
|
|
|
: data_->fbuf_const()->bands(channel);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
float* const* AudioBuffer::split_bands_f(size_t channel) {
|
|
|
|
|
return split_data_.get() ? split_data_->fbuf()->bands(channel)
|
|
|
|
|
: data_->fbuf()->bands(channel);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t AudioBuffer::num_channels() const {
|
|
|
|
|
return num_channels_;
|
|
|
|
|
}
|
|
|
|
|
|
Convert channel counts to size_t.
IIRC, this was originally requested by ajm during review of the other size_t conversions I did over the past year, and I agreed it made sense, but wanted to do it separately since those changes were already gargantuan.
BUG=chromium:81439
TEST=none
R=henrik.lundin@webrtc.org, henrika@webrtc.org, kjellander@webrtc.org, minyue@webrtc.org, perkj@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org, tina.legrand@webrtc.org
Review URL: https://codereview.webrtc.org/1316523002 .
Cr-Commit-Position: refs/heads/master@{#11229}
2016-01-12 16:26:35 -08:00
|
|
|
void AudioBuffer::set_num_channels(size_t num_channels) {
|
2014-12-11 17:09:21 +00:00
|
|
|
num_channels_ = num_channels;
|
2016-06-30 15:33:37 -07:00
|
|
|
data_->set_num_channels(num_channels);
|
|
|
|
|
if (split_data_.get()) {
|
|
|
|
|
split_data_->set_num_channels(num_channels);
|
|
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
size_t AudioBuffer::num_frames() const {
|
|
|
|
|
return proc_num_frames_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t AudioBuffer::num_frames_per_band() const {
|
|
|
|
|
return num_split_frames_;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t AudioBuffer::num_bands() const {
|
|
|
|
|
return num_bands_;
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-20 14:39:39 -07:00
|
|
|
// The resampler is only for supporting 48kHz to 16kHz in the reverse stream.
|
2019-08-21 17:52:28 +00:00
|
|
|
void AudioBuffer::DeinterleaveFrom(const AudioFrame* frame) {
|
|
|
|
|
RTC_DCHECK_EQ(frame->num_channels_, num_input_channels_);
|
2016-09-14 05:23:22 -07:00
|
|
|
RTC_DCHECK_EQ(frame->samples_per_channel_, input_num_frames_);
|
2019-08-21 17:52:28 +00:00
|
|
|
InitForNewData();
|
|
|
|
|
// Initialized lazily because there's a different condition in CopyFrom.
|
|
|
|
|
if ((input_num_frames_ != proc_num_frames_) && !input_buffer_) {
|
|
|
|
|
input_buffer_.reset(
|
|
|
|
|
new IFChannelBuffer(input_num_frames_, num_proc_channels_));
|
|
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
int16_t* const* deinterleaved;
|
|
|
|
|
if (input_num_frames_ == proc_num_frames_) {
|
|
|
|
|
deinterleaved = data_->ibuf()->channels();
|
Enable render downmixing to mono in AudioProcessing.
In practice, we have been doing this since time immemorial, but have
relied on the user to do the downmixing (first voice engine then
Chromium). It's more logical for this burden to fall on AudioProcessing,
however, who can be expected to know that this is a reasonable approach
for AEC. Permitting two render channels results in running two AECs
serially.
Critically, in my recent change to have Chromium adopt the float
interface:
https://codereview.chromium.org/420603004
I removed the downmixing by Chromium, forgetting that we hadn't yet
enabled this feature in AudioProcessing. This corrects that oversight.
The change in paths hit by production users is very minor. As commented
it required adding downmixing to the int16_t path to satisfy
bit-exactness tests.
For reference, find the ApmTest.Process errors here:
https://paste.googleplex.com/6372007910309888
BUG=webrtc:3853
TESTED=listened to the files output from the Process test, and verified
that they sound as expected: higher echo while the AEC is adapting, but
afterwards very close.
R=aluebs@webrtc.org, bjornv@webrtc.org, kwiberg@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/31459004
git-svn-id: http://webrtc.googlecode.com/svn/trunk@7292 4adac7df-926f-26a2-2b94-8c16560cd09d
2014-09-24 20:06:23 +00:00
|
|
|
} else {
|
2019-08-21 17:52:28 +00:00
|
|
|
deinterleaved = input_buffer_->ibuf()->channels();
|
|
|
|
|
}
|
|
|
|
|
// TODO(yujo): handle muted frames more efficiently.
|
|
|
|
|
if (num_proc_channels_ == 1) {
|
|
|
|
|
// Downmix and deinterleave simultaneously.
|
|
|
|
|
DownmixInterleavedToMono(frame->data(), input_num_frames_,
|
|
|
|
|
num_input_channels_, deinterleaved[0]);
|
|
|
|
|
} else {
|
|
|
|
|
RTC_DCHECK_EQ(num_proc_channels_, num_input_channels_);
|
|
|
|
|
Deinterleave(frame->data(), input_num_frames_, num_proc_channels_,
|
|
|
|
|
deinterleaved);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Resample.
|
|
|
|
|
if (input_num_frames_ != proc_num_frames_) {
|
|
|
|
|
for (size_t i = 0; i < num_proc_channels_; ++i) {
|
|
|
|
|
input_resamplers_[i]->Resample(
|
|
|
|
|
input_buffer_->fbuf_const()->channels()[i], input_num_frames_,
|
|
|
|
|
data_->fbuf()->channels()[i], proc_num_frames_);
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
void AudioBuffer::InterleaveTo(AudioFrame* frame) const {
|
2016-09-14 05:23:22 -07:00
|
|
|
RTC_DCHECK(frame->num_channels_ == num_channels_ || num_channels_ == 1);
|
|
|
|
|
RTC_DCHECK_EQ(frame->samples_per_channel_, output_num_frames_);
|
2015-08-14 10:35:55 -07:00
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
// Resample if necessary.
|
|
|
|
|
IFChannelBuffer* data_ptr = data_.get();
|
|
|
|
|
if (proc_num_frames_ != output_num_frames_) {
|
|
|
|
|
for (size_t i = 0; i < num_channels_; ++i) {
|
|
|
|
|
output_resamplers_[i]->Resample(
|
|
|
|
|
data_->fbuf()->channels()[i], proc_num_frames_,
|
|
|
|
|
output_buffer_->fbuf()->channels()[i], output_num_frames_);
|
2015-08-14 10:35:55 -07:00
|
|
|
}
|
2019-08-21 17:52:28 +00:00
|
|
|
data_ptr = output_buffer_.get();
|
|
|
|
|
}
|
2015-08-14 10:35:55 -07:00
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
// TODO(yujo): handle muted frames more efficiently.
|
|
|
|
|
if (frame->num_channels_ == num_channels_) {
|
|
|
|
|
Interleave(data_ptr->ibuf()->channels(), output_num_frames_, num_channels_,
|
|
|
|
|
frame->mutable_data());
|
2015-08-14 10:35:55 -07:00
|
|
|
} else {
|
2019-08-21 17:52:28 +00:00
|
|
|
UpmixMonoToInterleaved(data_ptr->ibuf()->channels()[0], output_num_frames_,
|
|
|
|
|
frame->num_channels_, frame->mutable_data());
|
2015-08-14 10:35:55 -07:00
|
|
|
}
|
2011-07-07 08:21:25 +00:00
|
|
|
}
|
|
|
|
|
|
2014-11-14 22:18:10 +00:00
|
|
|
void AudioBuffer::SplitIntoFrequencyBands() {
|
2015-02-10 22:52:15 +00:00
|
|
|
splitting_filter_->Analysis(data_.get(), split_data_.get());
|
2014-11-14 22:18:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioBuffer::MergeFrequencyBands() {
|
2015-02-10 22:52:15 +00:00
|
|
|
splitting_filter_->Synthesis(split_data_.get(), data_.get());
|
2014-11-14 22:18:10 +00:00
|
|
|
}
|
|
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
void AudioBuffer::CopySplitChannelDataTo(size_t channel,
|
2019-08-20 09:19:21 +02:00
|
|
|
int16_t* const* split_band_data) {
|
|
|
|
|
for (size_t k = 0; k < num_bands(); ++k) {
|
2019-08-21 17:52:28 +00:00
|
|
|
const float* band_data = split_bands_f(channel)[k];
|
2019-08-20 09:19:21 +02:00
|
|
|
RTC_DCHECK(split_band_data[k]);
|
|
|
|
|
RTC_DCHECK(band_data);
|
|
|
|
|
for (size_t i = 0; i < num_frames_per_band(); ++i) {
|
|
|
|
|
split_band_data[k][i] = FloatS16ToS16(band_data[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-21 17:52:28 +00:00
|
|
|
void AudioBuffer::CopySplitChannelDataFrom(
|
2019-08-20 09:19:21 +02:00
|
|
|
size_t channel,
|
|
|
|
|
const int16_t* const* split_band_data) {
|
|
|
|
|
for (size_t k = 0; k < num_bands(); ++k) {
|
2019-08-21 17:52:28 +00:00
|
|
|
float* band_data = split_bands_f(channel)[k];
|
2019-08-20 09:19:21 +02:00
|
|
|
RTC_DCHECK(split_band_data[k]);
|
|
|
|
|
RTC_DCHECK(band_data);
|
|
|
|
|
for (size_t i = 0; i < num_frames_per_band(); ++i) {
|
|
|
|
|
band_data[i] = split_band_data[k][i];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-07-07 08:21:25 +00:00
|
|
|
} // namespace webrtc
|