2012-09-10 17:58:21 +00:00
|
|
|
/*
|
|
|
|
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
|
|
|
|
*
|
|
|
|
|
* Use of this source code is governed by a BSD-style license
|
|
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
|
|
|
|
* in the file PATENTS. All contributing project authors may
|
|
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
|
|
|
*/
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
#if !defined(__has_feature) || !__has_feature(objc_arc)
|
|
|
|
|
#error "This file requires ARC support."
|
|
|
|
|
#endif
|
|
|
|
|
|
2014-07-15 20:20:47 +00:00
|
|
|
#import <AVFoundation/AVFoundation.h>
|
|
|
|
|
#import <Foundation/Foundation.h>
|
2012-09-10 17:58:21 +00:00
|
|
|
|
2013-07-11 13:24:38 +00:00
|
|
|
#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
|
2012-09-10 17:58:21 +00:00
|
|
|
|
2015-09-07 16:09:50 +02:00
|
|
|
#include "webrtc/base/atomicops.h"
|
2016-03-15 16:54:03 -07:00
|
|
|
#include "webrtc/base/bind.h"
|
2015-07-14 17:04:08 +02:00
|
|
|
#include "webrtc/base/checks.h"
|
2015-11-20 15:47:09 +01:00
|
|
|
#include "webrtc/base/criticalsection.h"
|
2015-07-14 17:04:08 +02:00
|
|
|
#include "webrtc/base/logging.h"
|
2016-03-15 16:54:03 -07:00
|
|
|
#include "webrtc/base/thread.h"
|
2015-11-20 15:47:09 +01:00
|
|
|
#include "webrtc/base/thread_annotations.h"
|
2015-09-07 16:09:50 +02:00
|
|
|
#include "webrtc/modules/audio_device/fine_audio_buffer.h"
|
2015-11-04 08:31:52 +01:00
|
|
|
#include "webrtc/modules/utility/include/helpers_ios.h"
|
2012-09-10 17:58:21 +00:00
|
|
|
|
2016-04-27 01:54:20 -07:00
|
|
|
#import "WebRTC/RTCLogging.h"
|
2016-02-18 15:44:07 -08:00
|
|
|
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h"
|
2016-03-15 16:54:03 -07:00
|
|
|
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSession+Private.h"
|
2016-03-12 20:06:28 -08:00
|
|
|
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h"
|
2016-03-15 16:54:03 -07:00
|
|
|
#import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionDelegateAdapter.h"
|
2015-11-20 15:47:09 +01:00
|
|
|
|
2016-02-18 15:44:07 -08:00
|
|
|
namespace webrtc {
|
2015-11-20 15:47:09 +01:00
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
#define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
|
2012-09-10 17:58:21 +00:00
|
|
|
|
2015-09-07 16:09:50 +02:00
|
|
|
#define LOG_AND_RETURN_IF_ERROR(error, message) \
|
|
|
|
|
do { \
|
|
|
|
|
OSStatus err = error; \
|
|
|
|
|
if (err) { \
|
|
|
|
|
LOG(LS_ERROR) << message << ": " << err; \
|
|
|
|
|
return false; \
|
|
|
|
|
} \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
2015-10-21 04:11:53 -07:00
|
|
|
#define LOG_IF_ERROR(error, message) \
|
|
|
|
|
do { \
|
|
|
|
|
OSStatus err = error; \
|
|
|
|
|
if (err) { \
|
|
|
|
|
LOG(LS_ERROR) << message << ": " << err; \
|
|
|
|
|
} \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
2016-03-12 20:06:28 -08:00
|
|
|
|
2015-09-07 16:09:50 +02:00
|
|
|
// Hardcoded delay estimates based on real measurements.
|
|
|
|
|
// TODO(henrika): these value is not used in combination with built-in AEC.
|
|
|
|
|
// Can most likely be removed.
|
|
|
|
|
const UInt16 kFixedPlayoutDelayEstimate = 30;
|
|
|
|
|
const UInt16 kFixedRecordDelayEstimate = 30;
|
2015-12-02 10:46:46 +01:00
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
using ios::CheckAndLogError;
|
2012-09-10 17:58:21 +00:00
|
|
|
|
2015-09-07 16:09:50 +02:00
|
|
|
#if !defined(NDEBUG)
|
|
|
|
|
// Helper method that logs essential device information strings.
|
2015-07-20 13:09:23 +02:00
|
|
|
static void LogDeviceInfo() {
|
|
|
|
|
LOG(LS_INFO) << "LogDeviceInfo";
|
|
|
|
|
@autoreleasepool {
|
|
|
|
|
LOG(LS_INFO) << " system name: " << ios::GetSystemName();
|
2016-03-03 16:59:50 +01:00
|
|
|
LOG(LS_INFO) << " system version 1(2): " << ios::GetSystemVersionAsString();
|
|
|
|
|
LOG(LS_INFO) << " system version 2(2): " << ios::GetSystemVersion();
|
2015-07-20 13:09:23 +02:00
|
|
|
LOG(LS_INFO) << " device type: " << ios::GetDeviceType();
|
|
|
|
|
LOG(LS_INFO) << " device name: " << ios::GetDeviceName();
|
2016-02-24 14:27:09 +01:00
|
|
|
LOG(LS_INFO) << " process name: " << ios::GetProcessName();
|
|
|
|
|
LOG(LS_INFO) << " process ID: " << ios::GetProcessID();
|
|
|
|
|
LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString();
|
|
|
|
|
LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount();
|
2016-03-03 16:59:50 +01:00
|
|
|
#if defined(__IPHONE_9_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0
|
2016-02-24 14:27:09 +01:00
|
|
|
LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled();
|
2016-03-03 16:59:50 +01:00
|
|
|
#endif
|
2015-07-20 13:09:23 +02:00
|
|
|
}
|
|
|
|
|
}
|
2015-09-07 16:09:50 +02:00
|
|
|
#endif // !defined(NDEBUG)
|
2015-07-20 13:09:23 +02:00
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
AudioDeviceIOS::AudioDeviceIOS()
|
2016-03-18 14:39:11 -07:00
|
|
|
: async_invoker_(new rtc::AsyncInvoker()),
|
|
|
|
|
audio_device_buffer_(nullptr),
|
|
|
|
|
audio_unit_(nullptr),
|
|
|
|
|
recording_(0),
|
|
|
|
|
playing_(0),
|
|
|
|
|
initialized_(false),
|
|
|
|
|
rec_is_initialized_(false),
|
|
|
|
|
play_is_initialized_(false),
|
|
|
|
|
is_interrupted_(false) {
|
2015-07-14 17:04:08 +02:00
|
|
|
LOGI() << "ctor" << ios::GetCurrentThreadDescription();
|
2016-03-15 16:54:03 -07:00
|
|
|
thread_ = rtc::Thread::Current();
|
|
|
|
|
audio_session_observer_ =
|
|
|
|
|
[[RTCAudioSessionDelegateAdapter alloc] initWithObserver:this];
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
AudioDeviceIOS::~AudioDeviceIOS() {
|
2015-11-20 15:47:09 +01:00
|
|
|
LOGI() << "~dtor" << ios::GetCurrentThreadDescription();
|
2016-03-15 16:54:03 -07:00
|
|
|
audio_session_observer_ = nil;
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
2015-07-14 17:04:08 +02:00
|
|
|
Terminate();
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
|
|
|
|
LOGI() << "AttachAudioBuffer";
|
2015-09-17 00:24:34 -07:00
|
|
|
RTC_DCHECK(audioBuffer);
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
|
|
|
audio_device_buffer_ = audioBuffer;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
int32_t AudioDeviceIOS::Init() {
|
|
|
|
|
LOGI() << "Init";
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
|
|
|
if (initialized_) {
|
2012-09-10 17:58:21 +00:00
|
|
|
return 0;
|
2015-07-14 17:04:08 +02:00
|
|
|
}
|
|
|
|
|
#if !defined(NDEBUG)
|
|
|
|
|
LogDeviceInfo();
|
|
|
|
|
#endif
|
2015-09-07 16:09:50 +02:00
|
|
|
// Store the preferred sample rate and preferred number of channels already
|
2016-03-12 20:06:28 -08:00
|
|
|
// here. They have not been set and confirmed yet since configureForWebRTC
|
2015-09-07 16:09:50 +02:00
|
|
|
// is not called until audio is about to start. However, it makes sense to
|
|
|
|
|
// store the parameters now and then verify at a later stage.
|
2016-03-12 20:06:28 -08:00
|
|
|
RTCAudioSessionConfiguration* config =
|
|
|
|
|
[RTCAudioSessionConfiguration webRTCConfiguration];
|
|
|
|
|
playout_parameters_.reset(config.sampleRate,
|
|
|
|
|
config.outputNumberOfChannels);
|
|
|
|
|
record_parameters_.reset(config.sampleRate,
|
|
|
|
|
config.inputNumberOfChannels);
|
2015-09-07 16:09:50 +02:00
|
|
|
// Ensure that the audio device buffer (ADB) knows about the internal audio
|
|
|
|
|
// parameters. Note that, even if we are unable to get a mono audio session,
|
|
|
|
|
// we will always tell the I/O audio unit to do a channel format conversion
|
|
|
|
|
// to guarantee mono on the "input side" of the audio unit.
|
|
|
|
|
UpdateAudioDeviceBuffer();
|
2015-10-01 07:36:45 -07:00
|
|
|
initialized_ = true;
|
2015-07-14 17:04:08 +02:00
|
|
|
return 0;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
int32_t AudioDeviceIOS::Terminate() {
|
|
|
|
|
LOGI() << "Terminate";
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
|
|
|
if (!initialized_) {
|
2012-09-10 17:58:21 +00:00
|
|
|
return 0;
|
2015-07-14 17:04:08 +02:00
|
|
|
}
|
2015-11-20 15:47:09 +01:00
|
|
|
StopPlayout();
|
|
|
|
|
StopRecording();
|
2015-10-01 07:36:45 -07:00
|
|
|
initialized_ = false;
|
2015-07-14 17:04:08 +02:00
|
|
|
return 0;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
int32_t AudioDeviceIOS::InitPlayout() {
|
|
|
|
|
LOGI() << "InitPlayout";
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
|
|
|
RTC_DCHECK(initialized_);
|
|
|
|
|
RTC_DCHECK(!play_is_initialized_);
|
2015-12-07 14:29:14 -08:00
|
|
|
RTC_DCHECK(!playing_);
|
2015-10-01 07:36:45 -07:00
|
|
|
if (!rec_is_initialized_) {
|
2015-09-07 16:09:50 +02:00
|
|
|
if (!InitPlayOrRecord()) {
|
2015-11-20 15:47:09 +01:00
|
|
|
LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!";
|
2015-07-14 17:04:08 +02:00
|
|
|
return -1;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
2015-07-14 17:04:08 +02:00
|
|
|
}
|
2015-10-01 07:36:45 -07:00
|
|
|
play_is_initialized_ = true;
|
2015-07-14 17:04:08 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
2012-09-10 17:58:21 +00:00
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
int32_t AudioDeviceIOS::InitRecording() {
|
2015-07-20 13:09:23 +02:00
|
|
|
LOGI() << "InitRecording";
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
|
|
|
RTC_DCHECK(initialized_);
|
|
|
|
|
RTC_DCHECK(!rec_is_initialized_);
|
2015-12-07 14:29:14 -08:00
|
|
|
RTC_DCHECK(!recording_);
|
2015-10-01 07:36:45 -07:00
|
|
|
if (!play_is_initialized_) {
|
2015-09-07 16:09:50 +02:00
|
|
|
if (!InitPlayOrRecord()) {
|
2015-11-20 15:47:09 +01:00
|
|
|
LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!";
|
2015-07-14 17:04:08 +02:00
|
|
|
return -1;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
2015-07-14 17:04:08 +02:00
|
|
|
}
|
2015-10-01 07:36:45 -07:00
|
|
|
rec_is_initialized_ = true;
|
2015-07-14 17:04:08 +02:00
|
|
|
return 0;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
int32_t AudioDeviceIOS::StartPlayout() {
|
|
|
|
|
LOGI() << "StartPlayout";
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
|
|
|
RTC_DCHECK(play_is_initialized_);
|
2015-12-07 14:29:14 -08:00
|
|
|
RTC_DCHECK(!playing_);
|
2016-03-21 13:57:40 -07:00
|
|
|
if (fine_audio_buffer_) {
|
|
|
|
|
fine_audio_buffer_->ResetPlayout();
|
|
|
|
|
}
|
|
|
|
|
if (!recording_ &&
|
|
|
|
|
audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
|
2016-03-18 14:39:11 -07:00
|
|
|
if (!audio_unit_->Start()) {
|
|
|
|
|
RTCLogError(@"StartPlayout failed to start audio unit.");
|
2015-07-14 17:04:08 +02:00
|
|
|
return -1;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
2015-11-20 15:47:09 +01:00
|
|
|
LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
|
2015-07-14 17:04:08 +02:00
|
|
|
}
|
2015-12-07 14:29:14 -08:00
|
|
|
rtc::AtomicOps::ReleaseStore(&playing_, 1);
|
2015-07-14 17:04:08 +02:00
|
|
|
return 0;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
int32_t AudioDeviceIOS::StopPlayout() {
|
|
|
|
|
LOGI() << "StopPlayout";
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
2015-12-07 14:29:14 -08:00
|
|
|
if (!play_is_initialized_ || !playing_) {
|
2015-07-14 17:04:08 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
2015-12-07 14:29:14 -08:00
|
|
|
if (!recording_) {
|
2015-07-14 17:04:08 +02:00
|
|
|
ShutdownPlayOrRecord();
|
|
|
|
|
}
|
2015-10-01 07:36:45 -07:00
|
|
|
play_is_initialized_ = false;
|
2015-12-07 14:29:14 -08:00
|
|
|
rtc::AtomicOps::ReleaseStore(&playing_, 0);
|
2015-07-14 17:04:08 +02:00
|
|
|
return 0;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
int32_t AudioDeviceIOS::StartRecording() {
|
|
|
|
|
LOGI() << "StartRecording";
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
|
|
|
RTC_DCHECK(rec_is_initialized_);
|
2015-12-07 14:29:14 -08:00
|
|
|
RTC_DCHECK(!recording_);
|
2016-03-21 13:57:40 -07:00
|
|
|
if (fine_audio_buffer_) {
|
|
|
|
|
fine_audio_buffer_->ResetRecord();
|
|
|
|
|
}
|
|
|
|
|
if (!playing_ &&
|
|
|
|
|
audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
|
2016-03-18 14:39:11 -07:00
|
|
|
if (!audio_unit_->Start()) {
|
|
|
|
|
RTCLogError(@"StartRecording failed to start audio unit.");
|
2015-07-14 17:04:08 +02:00
|
|
|
return -1;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
2015-11-20 15:47:09 +01:00
|
|
|
LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
|
2015-07-14 17:04:08 +02:00
|
|
|
}
|
2015-12-07 14:29:14 -08:00
|
|
|
rtc::AtomicOps::ReleaseStore(&recording_, 1);
|
2015-07-14 17:04:08 +02:00
|
|
|
return 0;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
int32_t AudioDeviceIOS::StopRecording() {
|
|
|
|
|
LOGI() << "StopRecording";
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
2015-12-07 14:29:14 -08:00
|
|
|
if (!rec_is_initialized_ || !recording_) {
|
2012-09-10 17:58:21 +00:00
|
|
|
return 0;
|
2015-07-14 17:04:08 +02:00
|
|
|
}
|
2015-12-07 14:29:14 -08:00
|
|
|
if (!playing_) {
|
2015-07-14 17:04:08 +02:00
|
|
|
ShutdownPlayOrRecord();
|
|
|
|
|
}
|
2015-10-01 07:36:45 -07:00
|
|
|
rec_is_initialized_ = false;
|
2015-12-07 14:29:14 -08:00
|
|
|
rtc::AtomicOps::ReleaseStore(&recording_, 0);
|
2015-07-14 17:04:08 +02:00
|
|
|
return 0;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
// Change the default receiver playout route to speaker.
|
|
|
|
|
int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
|
|
|
|
|
LOGI() << "SetLoudspeakerStatus(" << enable << ")";
|
|
|
|
|
|
2016-02-18 15:44:07 -08:00
|
|
|
RTCAudioSession* session = [RTCAudioSession sharedInstance];
|
|
|
|
|
[session lockForConfiguration];
|
2015-07-14 17:04:08 +02:00
|
|
|
NSString* category = session.category;
|
|
|
|
|
AVAudioSessionCategoryOptions options = session.categoryOptions;
|
|
|
|
|
// Respect old category options if category is
|
|
|
|
|
// AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
|
|
|
|
|
// might not be valid for this category.
|
|
|
|
|
if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
|
2012-09-10 17:58:21 +00:00
|
|
|
if (enable) {
|
2015-07-14 17:04:08 +02:00
|
|
|
options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
|
|
|
|
|
} else {
|
|
|
|
|
options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
2015-07-14 17:04:08 +02:00
|
|
|
} else {
|
|
|
|
|
options = AVAudioSessionCategoryOptionDefaultToSpeaker;
|
|
|
|
|
}
|
|
|
|
|
NSError* error = nil;
|
|
|
|
|
BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
|
|
|
|
|
withOptions:options
|
|
|
|
|
error:&error];
|
|
|
|
|
ios::CheckAndLogError(success, error);
|
2016-02-18 15:44:07 -08:00
|
|
|
[session unlockForConfiguration];
|
2015-07-14 17:04:08 +02:00
|
|
|
return (error == nil) ? 0 : -1;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool& enabled) const {
|
|
|
|
|
LOGI() << "GetLoudspeakerStatus";
|
2016-02-18 15:44:07 -08:00
|
|
|
RTCAudioSession* session = [RTCAudioSession sharedInstance];
|
2015-07-14 17:04:08 +02:00
|
|
|
AVAudioSessionCategoryOptions options = session.categoryOptions;
|
|
|
|
|
enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
|
|
|
|
|
return 0;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
|
2015-09-07 16:09:50 +02:00
|
|
|
delayMS = kFixedPlayoutDelayEstimate;
|
2015-07-14 17:04:08 +02:00
|
|
|
return 0;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
|
2015-09-07 16:09:50 +02:00
|
|
|
delayMS = kFixedRecordDelayEstimate;
|
2015-07-14 17:04:08 +02:00
|
|
|
return 0;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
|
2015-09-07 16:09:50 +02:00
|
|
|
LOGI() << "GetPlayoutAudioParameters";
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(playout_parameters_.is_valid());
|
|
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
|
|
|
*params = playout_parameters_;
|
2015-07-14 17:04:08 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
2012-09-10 17:58:21 +00:00
|
|
|
|
2015-07-14 17:04:08 +02:00
|
|
|
int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
|
2015-09-07 16:09:50 +02:00
|
|
|
LOGI() << "GetRecordAudioParameters";
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(record_parameters_.is_valid());
|
|
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
|
|
|
*params = record_parameters_;
|
2015-07-14 17:04:08 +02:00
|
|
|
return 0;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2016-03-15 16:54:03 -07:00
|
|
|
void AudioDeviceIOS::OnInterruptionBegin() {
|
|
|
|
|
RTC_DCHECK(async_invoker_);
|
|
|
|
|
RTC_DCHECK(thread_);
|
|
|
|
|
if (thread_->IsCurrent()) {
|
|
|
|
|
HandleInterruptionBegin();
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
async_invoker_->AsyncInvoke<void>(
|
|
|
|
|
thread_,
|
|
|
|
|
rtc::Bind(&webrtc::AudioDeviceIOS::HandleInterruptionBegin, this));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioDeviceIOS::OnInterruptionEnd() {
|
|
|
|
|
RTC_DCHECK(async_invoker_);
|
|
|
|
|
RTC_DCHECK(thread_);
|
|
|
|
|
if (thread_->IsCurrent()) {
|
|
|
|
|
HandleInterruptionEnd();
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
async_invoker_->AsyncInvoke<void>(
|
|
|
|
|
thread_,
|
|
|
|
|
rtc::Bind(&webrtc::AudioDeviceIOS::HandleInterruptionEnd, this));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioDeviceIOS::OnValidRouteChange() {
|
|
|
|
|
RTC_DCHECK(async_invoker_);
|
|
|
|
|
RTC_DCHECK(thread_);
|
|
|
|
|
if (thread_->IsCurrent()) {
|
|
|
|
|
HandleValidRouteChange();
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
async_invoker_->AsyncInvoke<void>(
|
|
|
|
|
thread_,
|
|
|
|
|
rtc::Bind(&webrtc::AudioDeviceIOS::HandleValidRouteChange, this));
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-21 13:57:40 -07:00
|
|
|
void AudioDeviceIOS::OnConfiguredForWebRTC() {
|
|
|
|
|
RTC_DCHECK(async_invoker_);
|
|
|
|
|
RTC_DCHECK(thread_);
|
|
|
|
|
if (thread_->IsCurrent()) {
|
|
|
|
|
HandleValidRouteChange();
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
async_invoker_->AsyncInvoke<void>(
|
|
|
|
|
thread_,
|
|
|
|
|
rtc::Bind(&webrtc::AudioDeviceIOS::HandleConfiguredForWebRTC, this));
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-18 14:39:11 -07:00
|
|
|
OSStatus AudioDeviceIOS::OnDeliverRecordedData(
|
|
|
|
|
AudioUnitRenderActionFlags* flags,
|
|
|
|
|
const AudioTimeStamp* time_stamp,
|
|
|
|
|
UInt32 bus_number,
|
|
|
|
|
UInt32 num_frames,
|
|
|
|
|
AudioBufferList* /* io_data */) {
|
|
|
|
|
OSStatus result = noErr;
|
|
|
|
|
// Simply return if recording is not enabled.
|
|
|
|
|
if (!rtc::AtomicOps::AcquireLoad(&recording_))
|
|
|
|
|
return result;
|
|
|
|
|
|
|
|
|
|
size_t frames_per_buffer = record_parameters_.frames_per_buffer();
|
|
|
|
|
if (num_frames != frames_per_buffer) {
|
|
|
|
|
// We have seen short bursts (1-2 frames) where |in_number_frames| changes.
|
|
|
|
|
// Add a log to keep track of longer sequences if that should ever happen.
|
|
|
|
|
// Also return since calling AudioUnitRender in this state will only result
|
|
|
|
|
// in kAudio_ParamError (-50) anyhow.
|
|
|
|
|
RTCLogWarning(@"Expected %u frames but got %u",
|
|
|
|
|
static_cast<unsigned int>(frames_per_buffer),
|
|
|
|
|
static_cast<unsigned int>(num_frames));
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Obtain the recorded audio samples by initiating a rendering cycle.
|
|
|
|
|
// Since it happens on the input bus, the |io_data| parameter is a reference
|
|
|
|
|
// to the preallocated audio buffer list that the audio unit renders into.
|
|
|
|
|
// We can make the audio unit provide a buffer instead in io_data, but we
|
|
|
|
|
// currently just use our own.
|
|
|
|
|
// TODO(henrika): should error handling be improved?
|
|
|
|
|
AudioBufferList* io_data = &audio_record_buffer_list_;
|
|
|
|
|
result =
|
|
|
|
|
audio_unit_->Render(flags, time_stamp, bus_number, num_frames, io_data);
|
|
|
|
|
if (result != noErr) {
|
|
|
|
|
RTCLogError(@"Failed to render audio.");
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get a pointer to the recorded audio and send it to the WebRTC ADB.
|
|
|
|
|
// Use the FineAudioBuffer instance to convert between native buffer size
|
|
|
|
|
// and the 10ms buffer size used by WebRTC.
|
|
|
|
|
AudioBuffer* audio_buffer = &io_data->mBuffers[0];
|
|
|
|
|
const size_t size_in_bytes = audio_buffer->mDataByteSize;
|
|
|
|
|
RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample,
|
|
|
|
|
num_frames);
|
|
|
|
|
int8_t* data = static_cast<int8_t*>(audio_buffer->mData);
|
|
|
|
|
fine_audio_buffer_->DeliverRecordedData(data, size_in_bytes,
|
|
|
|
|
kFixedPlayoutDelayEstimate,
|
|
|
|
|
kFixedRecordDelayEstimate);
|
|
|
|
|
return noErr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
|
|
|
|
|
const AudioTimeStamp* time_stamp,
|
|
|
|
|
UInt32 bus_number,
|
|
|
|
|
UInt32 num_frames,
|
|
|
|
|
AudioBufferList* io_data) {
|
|
|
|
|
// Verify 16-bit, noninterleaved mono PCM signal format.
|
|
|
|
|
RTC_DCHECK_EQ(1u, io_data->mNumberBuffers);
|
|
|
|
|
AudioBuffer* audio_buffer = &io_data->mBuffers[0];
|
|
|
|
|
RTC_DCHECK_EQ(1u, audio_buffer->mNumberChannels);
|
|
|
|
|
// Get pointer to internal audio buffer to which new audio data shall be
|
|
|
|
|
// written.
|
|
|
|
|
const size_t size_in_bytes = audio_buffer->mDataByteSize;
|
|
|
|
|
RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample,
|
|
|
|
|
num_frames);
|
|
|
|
|
int8_t* destination = reinterpret_cast<int8_t*>(audio_buffer->mData);
|
|
|
|
|
// Produce silence and give audio unit a hint about it if playout is not
|
|
|
|
|
// activated.
|
|
|
|
|
if (!rtc::AtomicOps::AcquireLoad(&playing_)) {
|
|
|
|
|
*flags |= kAudioUnitRenderAction_OutputIsSilence;
|
|
|
|
|
memset(destination, 0, size_in_bytes);
|
|
|
|
|
return noErr;
|
|
|
|
|
}
|
|
|
|
|
// Read decoded 16-bit PCM samples from WebRTC (using a size that matches
|
|
|
|
|
// the native I/O audio unit) to a preallocated intermediate buffer and
|
|
|
|
|
// copy the result to the audio buffer in the |io_data| destination.
|
|
|
|
|
int8_t* source = playout_audio_buffer_.get();
|
|
|
|
|
fine_audio_buffer_->GetPlayoutData(source);
|
|
|
|
|
memcpy(destination, source, size_in_bytes);
|
|
|
|
|
return noErr;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-15 16:54:03 -07:00
|
|
|
void AudioDeviceIOS::HandleInterruptionBegin() {
|
|
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
2016-03-21 13:57:40 -07:00
|
|
|
|
2016-03-15 16:54:03 -07:00
|
|
|
RTCLog(@"Stopping the audio unit due to interruption begin.");
|
2016-03-18 14:39:11 -07:00
|
|
|
if (!audio_unit_->Stop()) {
|
|
|
|
|
RTCLogError(@"Failed to stop the audio unit.");
|
|
|
|
|
}
|
2016-03-15 16:54:03 -07:00
|
|
|
is_interrupted_ = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioDeviceIOS::HandleInterruptionEnd() {
|
|
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
2016-03-21 13:57:40 -07:00
|
|
|
|
2016-03-15 16:54:03 -07:00
|
|
|
RTCLog(@"Starting the audio unit due to interruption end.");
|
2016-03-18 14:39:11 -07:00
|
|
|
if (!audio_unit_->Start()) {
|
|
|
|
|
RTCLogError(@"Failed to start the audio unit.");
|
|
|
|
|
}
|
2016-03-15 16:54:03 -07:00
|
|
|
is_interrupted_ = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void AudioDeviceIOS::HandleValidRouteChange() {
|
|
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
|
|
|
|
|
|
|
|
// Don't do anything if we're interrupted.
|
|
|
|
|
if (is_interrupted_) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Only restart audio for a valid route change if the session sample rate
|
|
|
|
|
// has changed.
|
|
|
|
|
RTCAudioSession* session = [RTCAudioSession sharedInstance];
|
|
|
|
|
const double current_sample_rate = playout_parameters_.sample_rate();
|
|
|
|
|
const double session_sample_rate = session.sampleRate;
|
|
|
|
|
if (current_sample_rate != session_sample_rate) {
|
|
|
|
|
RTCLog(@"Route changed caused sample rate to change from %f to %f. "
|
|
|
|
|
"Restarting audio unit.", current_sample_rate, session_sample_rate);
|
2016-03-18 14:39:11 -07:00
|
|
|
if (!RestartAudioUnit(session_sample_rate)) {
|
2016-03-15 16:54:03 -07:00
|
|
|
RTCLogError(@"Audio restart failed.");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-21 13:57:40 -07:00
|
|
|
void AudioDeviceIOS::HandleConfiguredForWebRTC() {
|
|
|
|
|
RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
|
|
|
|
|
|
|
|
|
// If we're not initialized we don't need to do anything. Audio unit will
|
|
|
|
|
// be initialized on initialization.
|
|
|
|
|
if (!rec_is_initialized_ && !play_is_initialized_)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
// If we're initialized, we must have an audio unit.
|
|
|
|
|
RTC_DCHECK(audio_unit_);
|
|
|
|
|
|
|
|
|
|
// Use configured audio session's settings to set up audio device buffer.
|
|
|
|
|
// TODO(tkchin): Use RTCAudioSessionConfiguration to pick up settings and
|
|
|
|
|
// pass it along.
|
|
|
|
|
SetupAudioBuffersForActiveAudioSession();
|
|
|
|
|
|
|
|
|
|
// Initialize the audio unit. This will affect any existing audio playback.
|
|
|
|
|
if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) {
|
|
|
|
|
RTCLogError(@"Failed to initialize audio unit after configuration.");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If we haven't started playing or recording there's nothing more to do.
|
|
|
|
|
if (!playing_ && !recording_)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
// We are in a play or record state, start the audio unit.
|
|
|
|
|
if (!audio_unit_->Start()) {
|
|
|
|
|
RTCLogError(@"Failed to start audio unit after configuration.");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-07 16:09:50 +02:00
|
|
|
void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
|
|
|
|
|
LOGI() << "UpdateAudioDevicebuffer";
|
|
|
|
|
// AttachAudioBuffer() is called at construction by the main class but check
|
|
|
|
|
// just in case.
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
|
2015-09-07 16:09:50 +02:00
|
|
|
// Inform the audio device buffer (ADB) about the new audio format.
|
2015-10-01 07:36:45 -07:00
|
|
|
audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
|
|
|
|
|
audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
|
|
|
|
|
audio_device_buffer_->SetRecordingSampleRate(
|
|
|
|
|
record_parameters_.sample_rate());
|
|
|
|
|
audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
|
2015-09-07 16:09:50 +02:00
|
|
|
}
|
2015-07-14 17:04:08 +02:00
|
|
|
|
2015-09-07 16:09:50 +02:00
|
|
|
void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
|
|
|
|
|
LOGI() << "SetupAudioBuffersForActiveAudioSession";
|
|
|
|
|
// Verify the current values once the audio session has been activated.
|
2016-02-18 15:44:07 -08:00
|
|
|
RTCAudioSession* session = [RTCAudioSession sharedInstance];
|
2016-03-15 16:54:03 -07:00
|
|
|
double sample_rate = session.sampleRate;
|
|
|
|
|
NSTimeInterval io_buffer_duration = session.IOBufferDuration;
|
2016-03-18 14:39:11 -07:00
|
|
|
RTCLog(@"%@", session);
|
2015-10-21 04:11:53 -07:00
|
|
|
|
2015-09-07 16:09:50 +02:00
|
|
|
// Log a warning message for the case when we are unable to set the preferred
|
|
|
|
|
// hardware sample rate but continue and use the non-ideal sample rate after
|
2015-10-21 04:11:53 -07:00
|
|
|
// reinitializing the audio parameters. Most BT headsets only support 8kHz or
|
|
|
|
|
// 16kHz.
|
2016-03-12 20:06:28 -08:00
|
|
|
RTCAudioSessionConfiguration* webRTCConfig =
|
|
|
|
|
[RTCAudioSessionConfiguration webRTCConfiguration];
|
2016-03-15 16:54:03 -07:00
|
|
|
if (sample_rate != webRTCConfig.sampleRate) {
|
2015-10-21 04:11:53 -07:00
|
|
|
LOG(LS_WARNING) << "Unable to set the preferred sample rate";
|
2015-09-07 16:09:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// At this stage, we also know the exact IO buffer duration and can add
|
|
|
|
|
// that info to the existing audio parameters where it is converted into
|
|
|
|
|
// number of audio frames.
|
|
|
|
|
// Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
|
|
|
|
|
// Hence, 128 is the size we expect to see in upcoming render callbacks.
|
2016-03-15 16:54:03 -07:00
|
|
|
playout_parameters_.reset(sample_rate, playout_parameters_.channels(),
|
|
|
|
|
io_buffer_duration);
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(playout_parameters_.is_complete());
|
2016-03-15 16:54:03 -07:00
|
|
|
record_parameters_.reset(sample_rate, record_parameters_.channels(),
|
|
|
|
|
io_buffer_duration);
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(record_parameters_.is_complete());
|
2015-09-07 16:09:50 +02:00
|
|
|
LOG(LS_INFO) << " frames per I/O buffer: "
|
2015-10-01 07:36:45 -07:00
|
|
|
<< playout_parameters_.frames_per_buffer();
|
2015-09-07 16:09:50 +02:00
|
|
|
LOG(LS_INFO) << " bytes per I/O buffer: "
|
2015-10-01 07:36:45 -07:00
|
|
|
<< playout_parameters_.GetBytesPerBuffer();
|
|
|
|
|
RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(),
|
|
|
|
|
record_parameters_.GetBytesPerBuffer());
|
2015-09-07 16:09:50 +02:00
|
|
|
|
|
|
|
|
// Update the ADB parameters since the sample rate might have changed.
|
|
|
|
|
UpdateAudioDeviceBuffer();
|
|
|
|
|
|
|
|
|
|
// Create a modified audio buffer class which allows us to ask for,
|
|
|
|
|
// or deliver, any number of samples (and not only multiple of 10ms) to match
|
|
|
|
|
// the native audio unit buffer size.
|
2015-10-01 07:36:45 -07:00
|
|
|
RTC_DCHECK(audio_device_buffer_);
|
|
|
|
|
fine_audio_buffer_.reset(new FineAudioBuffer(
|
|
|
|
|
audio_device_buffer_, playout_parameters_.GetBytesPerBuffer(),
|
|
|
|
|
playout_parameters_.sample_rate()));
|
2015-09-07 16:09:50 +02:00
|
|
|
|
|
|
|
|
// The extra/temporary playoutbuffer must be of this size to avoid
|
|
|
|
|
// unnecessary memcpy while caching data between successive callbacks.
|
2015-10-01 07:36:45 -07:00
|
|
|
const int required_playout_buffer_size =
|
|
|
|
|
fine_audio_buffer_->RequiredPlayoutBufferSizeBytes();
|
2015-09-07 16:09:50 +02:00
|
|
|
LOG(LS_INFO) << " required playout buffer size: "
|
2015-10-01 07:36:45 -07:00
|
|
|
<< required_playout_buffer_size;
|
|
|
|
|
playout_audio_buffer_.reset(new SInt8[required_playout_buffer_size]);
|
2015-09-07 16:09:50 +02:00
|
|
|
|
|
|
|
|
// Allocate AudioBuffers to be used as storage for the received audio.
|
|
|
|
|
// The AudioBufferList structure works as a placeholder for the
|
|
|
|
|
// AudioBuffer structure, which holds a pointer to the actual data buffer
|
2015-10-01 07:36:45 -07:00
|
|
|
// in |record_audio_buffer_|. Recorded audio will be rendered into this memory
|
2015-09-07 16:09:50 +02:00
|
|
|
// at each input callback when calling AudioUnitRender().
|
2015-10-01 07:36:45 -07:00
|
|
|
const int data_byte_size = record_parameters_.GetBytesPerBuffer();
|
|
|
|
|
record_audio_buffer_.reset(new SInt8[data_byte_size]);
|
|
|
|
|
audio_record_buffer_list_.mNumberBuffers = 1;
|
|
|
|
|
AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0];
|
|
|
|
|
audio_buffer->mNumberChannels = record_parameters_.channels();
|
|
|
|
|
audio_buffer->mDataByteSize = data_byte_size;
|
|
|
|
|
audio_buffer->mData = record_audio_buffer_.get();
|
2015-09-07 16:09:50 +02:00
|
|
|
}
|
2015-07-14 17:04:08 +02:00
|
|
|
|
2016-03-18 14:39:11 -07:00
|
|
|
bool AudioDeviceIOS::CreateAudioUnit() {
|
|
|
|
|
RTC_DCHECK(!audio_unit_);
|
2015-12-02 10:46:46 +01:00
|
|
|
|
2016-03-18 14:39:11 -07:00
|
|
|
audio_unit_.reset(new VoiceProcessingAudioUnit(this));
|
|
|
|
|
if (!audio_unit_->Init()) {
|
|
|
|
|
audio_unit_.reset();
|
2015-11-20 15:47:09 +01:00
|
|
|
return false;
|
|
|
|
|
}
|
2015-09-07 16:09:50 +02:00
|
|
|
|
2016-03-18 14:39:11 -07:00
|
|
|
return true;
|
|
|
|
|
}
|
2015-07-14 17:04:08 +02:00
|
|
|
|
2016-03-18 14:39:11 -07:00
|
|
|
bool AudioDeviceIOS::RestartAudioUnit(float sample_rate) {
|
|
|
|
|
RTCLog(@"Restarting audio unit with new sample rate: %f", sample_rate);
|
2015-09-07 16:09:50 +02:00
|
|
|
|
2016-03-18 14:39:11 -07:00
|
|
|
// Stop the active audio unit.
|
|
|
|
|
if (!audio_unit_->Stop()) {
|
|
|
|
|
RTCLogError(@"Failed to stop the audio unit.");
|
2015-11-20 15:47:09 +01:00
|
|
|
return false;
|
|
|
|
|
}
|
2015-09-07 16:09:50 +02:00
|
|
|
|
2016-03-18 14:39:11 -07:00
|
|
|
// The stream format is about to be changed and it requires that we first
|
|
|
|
|
// uninitialize it to deallocate its resources.
|
|
|
|
|
if (!audio_unit_->Uninitialize()) {
|
|
|
|
|
RTCLogError(@"Failed to uninitialize the audio unit.");
|
2015-11-20 15:47:09 +01:00
|
|
|
return false;
|
|
|
|
|
}
|
2015-07-14 17:04:08 +02:00
|
|
|
|
2016-03-18 14:39:11 -07:00
|
|
|
// Allocate new buffers given the new stream format.
|
|
|
|
|
SetupAudioBuffersForActiveAudioSession();
|
2015-09-07 16:09:50 +02:00
|
|
|
|
2016-03-18 14:39:11 -07:00
|
|
|
// Initialize the audio unit again with the new sample rate.
|
|
|
|
|
RTC_DCHECK_EQ(playout_parameters_.sample_rate(), sample_rate);
|
|
|
|
|
if (!audio_unit_->Initialize(sample_rate)) {
|
|
|
|
|
RTCLogError(@"Failed to initialize the audio unit with sample rate: %f",
|
|
|
|
|
sample_rate);
|
|
|
|
|
return false;
|
2015-11-20 15:47:09 +01:00
|
|
|
}
|
2015-09-07 16:09:50 +02:00
|
|
|
|
2016-03-18 14:39:11 -07:00
|
|
|
// Restart the audio unit.
|
|
|
|
|
if (!audio_unit_->Start()) {
|
|
|
|
|
RTCLogError(@"Failed to start audio unit.");
|
|
|
|
|
return false;
|
2015-11-20 15:47:09 +01:00
|
|
|
}
|
2016-03-18 14:39:11 -07:00
|
|
|
RTCLog(@"Successfully restarted audio unit.");
|
2015-10-21 04:11:53 -07:00
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-07 16:09:50 +02:00
|
|
|
bool AudioDeviceIOS::InitPlayOrRecord() {
|
|
|
|
|
LOGI() << "InitPlayOrRecord";
|
2015-11-20 15:47:09 +01:00
|
|
|
|
2016-03-21 13:57:40 -07:00
|
|
|
if (!CreateAudioUnit()) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2016-02-18 15:44:07 -08:00
|
|
|
RTCAudioSession* session = [RTCAudioSession sharedInstance];
|
2016-03-21 13:57:40 -07:00
|
|
|
// Subscribe to audio session events.
|
|
|
|
|
[session pushDelegate:audio_session_observer_];
|
|
|
|
|
|
|
|
|
|
// Lock the session to make configuration changes.
|
2016-03-12 20:06:28 -08:00
|
|
|
[session lockForConfiguration];
|
|
|
|
|
NSError* error = nil;
|
2016-03-21 13:57:40 -07:00
|
|
|
if (![session beginWebRTCSession:&error]) {
|
2016-03-12 20:06:28 -08:00
|
|
|
[session unlockForConfiguration];
|
2016-03-21 13:57:40 -07:00
|
|
|
RTCLogError(@"Failed to begin WebRTC session: %@",
|
|
|
|
|
error.localizedDescription);
|
2015-11-20 15:47:09 +01:00
|
|
|
return false;
|
|
|
|
|
}
|
2015-07-14 17:04:08 +02:00
|
|
|
|
2016-03-21 13:57:40 -07:00
|
|
|
// If we are already configured properly, we can initialize the audio unit.
|
|
|
|
|
if (session.isConfiguredForWebRTC) {
|
2016-03-12 20:06:28 -08:00
|
|
|
[session unlockForConfiguration];
|
2016-03-21 13:57:40 -07:00
|
|
|
SetupAudioBuffersForActiveAudioSession();
|
|
|
|
|
// Audio session has been marked ready for WebRTC so we can initialize the
|
|
|
|
|
// audio unit now.
|
|
|
|
|
audio_unit_->Initialize(playout_parameters_.sample_rate());
|
|
|
|
|
return true;
|
2015-07-14 17:04:08 +02:00
|
|
|
}
|
2016-03-21 13:57:40 -07:00
|
|
|
|
|
|
|
|
// Release the lock.
|
2016-03-12 20:06:28 -08:00
|
|
|
[session unlockForConfiguration];
|
2016-03-18 14:39:11 -07:00
|
|
|
|
2015-09-07 16:09:50 +02:00
|
|
|
return true;
|
2012-09-10 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
|
2015-11-20 15:47:09 +01:00
|
|
|
void AudioDeviceIOS::ShutdownPlayOrRecord() {
|
2015-07-14 17:04:08 +02:00
|
|
|
LOGI() << "ShutdownPlayOrRecord";
|
2016-03-18 14:39:11 -07:00
|
|
|
|
2015-09-07 16:09:50 +02:00
|
|
|
// Close and delete the voice-processing I/O unit.
|
2016-03-18 14:39:11 -07:00
|
|
|
if (audio_unit_) {
|
|
|
|
|
audio_unit_.reset();
|
2015-07-14 17:04:08 +02:00
|
|
|
}
|
2015-10-21 04:11:53 -07:00
|
|
|
|
2015-11-20 15:47:09 +01:00
|
|
|
// Remove audio session notification observers.
|
2016-03-15 16:54:03 -07:00
|
|
|
RTCAudioSession* session = [RTCAudioSession sharedInstance];
|
|
|
|
|
[session removeDelegate:audio_session_observer_];
|
2015-11-20 15:47:09 +01:00
|
|
|
|
2015-07-20 13:09:23 +02:00
|
|
|
// All I/O should be stopped or paused prior to deactivating the audio
|
|
|
|
|
// session, hence we deactivate as last action.
|
2016-03-12 20:06:28 -08:00
|
|
|
[session lockForConfiguration];
|
2016-03-21 13:57:40 -07:00
|
|
|
[session endWebRTCSession:nil];
|
2016-03-12 20:06:28 -08:00
|
|
|
[session unlockForConfiguration];
|
2015-11-20 15:47:09 +01:00
|
|
|
}
|
|
|
|
|
|
2012-09-10 17:58:21 +00:00
|
|
|
} // namespace webrtc
|