Skip to content

Instantly share code, notes, and snippets.

@dneprDroid
Last active March 27, 2020 08:25
Show Gist options
  • Select an option

  • Save dneprDroid/7b7f396db64d2291975da00b18ddb657 to your computer and use it in GitHub Desktop.

Select an option

Save dneprDroid/7b7f396db64d2291975da00b18ddb657 to your computer and use it in GitHub Desktop.
Audiotest AudioUnit iOS - sin. sound
#import "Audiotest.h"
#import <Foundation/Foundation.h>
#import <AVFoundation/AVFoundation.h>
@interface Audiotest : NSObject {
@public
double frequency;
double sampleRate;
double theta;
AudioComponentInstance toneUnit;
}
-(void)play;
@end
OSStatus RenderTone(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 num_frames,
AudioBufferList *io_data)
{
static double currentPhase = 0;
Float32 * outputBuffer = (Float32 *)io_data->mBuffers[0].mData;
const double frequency = 440.;
const double phaseStep = (frequency / 44100.) * (M_PI * 2.);
for(int i = 0; i < num_frames; i++) {
outputBuffer[i] = sin(currentPhase);
currentPhase += phaseStep;
}
// If we were doing stereo (or more), this would copy our sine wave samples
// to all of the remaining channels
for(int i = 1; i < io_data->mNumberBuffers; i++) {
memcpy(io_data->mBuffers[i].mData, outputBuffer,
io_data->mBuffers[i].mDataByteSize);
}
// writing the current phase back to inRefCon so we can use it on the next call
return noErr;
}
@implementation Audiotest
- (instancetype)init
{
self = [super init];
if (self) {
sampleRate = 44100;
frequency = 2;
}
return self;
}
-(void)create {
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;
// Get the default playback output unit
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
NSAssert(defaultOutput, @"Can't find default output");
// Create a new unit based on this that we'll use for output
OSStatus err = AudioComponentInstanceNew(defaultOutput, &toneUnit);
NSAssert1(toneUnit, @"Error creating unit: %ld", err);
// Set our tone rendering function on the unit
AURenderCallbackStruct input;
input.inputProc = RenderTone;
input.inputProcRefCon = (__bridge void * _Nullable)(self);
err = AudioUnitSetProperty(toneUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input));
NSAssert1(err == noErr, @"Error setting callback: %ld", err);
// Set the format to 32 bit, single channel, floating point, linear PCM
const int four_bytes_per_float = 4;
const int eight_bits_per_byte = 8;
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags =
kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = four_bytes_per_float;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = four_bytes_per_float;
streamFormat.mChannelsPerFrame = 1;
streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
err = AudioUnitSetProperty (toneUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(AudioStreamBasicDescription));
NSAssert1(err == noErr, @"Error setting stream format: %ld", err);
}
-(void)play {
[[AVAudioSession sharedInstance]
setCategory:AVAudioSessionCategoryPlayback
withOptions:AVAudioSessionCategoryOptionMixWithOthers | AVAudioSessionCategoryOptionDuckOthers
error:nil];
[[AVAudioSession sharedInstance] setActive: YES error: nil];
if (toneUnit)
{
AudioOutputUnitStop(toneUnit);
AudioUnitUninitialize(toneUnit);
AudioComponentInstanceDispose(toneUnit);
toneUnit = nil;
}
else
{
[self create];
// Stop changing parameters on the unit
OSStatus err = AudioUnitInitialize(toneUnit);
NSAssert1(err == noErr, @"Error initializing unit: %ld", err);
// Start playback
err = AudioOutputUnitStart(toneUnit);
NSAssert1(err == noErr, @"Error starting unit: %ld", err);
}
}
@end
void FineAudioBuffer::GetPlayoutData(rtc::ArrayView<int16_t> audio_buffer,
int playout_delay_ms) {
RTC_DCHECK(IsReadyForPlayout());
printf("[FineAudioBuffer::GetPlayoutData] called (size=%lu).......\n",
(long)playout_buffer_.size());
static double currentPhase = 0;
// ioData is where we're supposed to put the audio samples we've created
float_t * outputBuffer = reinterpret_cast<float_t *>(audio_buffer.data());
const double frequency = 440.;
const double phaseStep = (frequency / 44100.) * (M_PI * 2.);
for(size_t i = 0; i < audio_buffer.size(); i++) {
outputBuffer[i] = sin(currentPhase);
currentPhase += phaseStep;
}
}
@dneprDroid
Copy link
Copy Markdown
Author

dneprDroid commented Mar 27, 2020

audio/audio_transport_impl.cc

@dneprDroid
Copy link
Copy Markdown
Author

media/engine/webrtc_voice_engine.cc

@dneprDroid
Copy link
Copy Markdown
Author

pc/remote_audio_source.cc

@dneprDroid
Copy link
Copy Markdown
Author

dneprDroid commented Mar 27, 2020

FineAudioBuffer ::AttachAudioBuffer

AudioDeviceModuleIOS::AttachAudioBuffer() {

modules/audio_device/audio_device_buffer.cc

int32_t AudioDeviceBuffer::RegisterAudioCallback(
AudioTransport* audio_callback) {
RTC_DCHECK_RUN_ON(&main_thread_checker_);
RTC_LOG(INFO) << FUNCTION;
if (playing_ || recording_) {
RTC_LOG(LS_ERROR) << "Failed to set audio transport since media was active";
return -1;
}
audio_transport_cb_ = audio_callback;
return 0;
}

audio/audio_transport_impl.cc

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment