Created
October 25, 2018 15:55
-
-
Save jsbain/ed6a6956c43f3d8fd40092e93e49a007 to your computer and use it in GitHub Desktop.
audiounittest2.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from objc_util import * | |
from ctypes import * | |
from coreaudioconstants import * | |
import time | |
import numpy as np | |
''' Adapted from https://www.cocoawithlove.com/2010/10/ios-tone-generator-introduction-to.html | |
''' | |
AudioUnitRenderActionFlags=c_uint32 | |
OSStatus=c_int32 | |
OSType=c_uint32 | |
class SMPTETimeType(c_uint32): | |
kSMPTETimeType24 = 0 | |
kSMPTETimeType25 = 1 | |
kSMPTETimeType30Drop = 2 | |
kSMPTETimeType30 = 3 | |
kSMPTETimeType2997 = 4 | |
kSMPTETimeType2997Drop = 5 | |
kSMPTETimeType60 = 6 | |
kSMPTETimeType5994 = 7 | |
kSMPTETimeType60Drop = 8 | |
kSMPTETimeType5994Drop = 9 | |
kSMPTETimeType50 = 10 | |
kSMPTETimeType2398 = 11 | |
class SMPTETimeFlags(c_uint32): | |
kSMPTETimeUnknown = 0 | |
kSMPTETimeValid = (1 << 0) | |
kSMPTETimeRunning = (1 << 1) | |
''' | |
/*! | |
@enum SMPTE State Flags | |
@abstract Flags that describe the SMPTE time state. | |
@constant kSMPTETimeValid | |
The full time is valid. | |
@constant kSMPTETimeRunning | |
Time is running. | |
''' | |
''' | |
/*! | |
@struct SMPTETime | |
@abstract A structure for holding a SMPTE time. | |
@field mSubframes | |
The number of subframes in the full message. | |
@field mSubframeDivisor | |
The number of subframes per frame (typically 80). | |
@field mCounter | |
The total number of messages received. | |
@field mType | |
The kind of SMPTE time using the SMPTE time type constants. | |
@field mFlags | |
A set of flags that indicate the SMPTE state. | |
@field mHours | |
The number of hours in the full message. | |
@field mMinutes | |
The number of minutes in the full message. | |
@field mSeconds | |
The number of seconds in the full message. | |
@field mFrames | |
The number of frames in the full message. | |
''' | |
class SMPTETime(Structure): | |
_fields_=[('mSubframes',c_int16), | |
('mSubframeDivisor',c_int16), | |
('mCounter',c_uint32), | |
('mType',SMPTETimeType), | |
('mFlags',SMPTETimeFlags), | |
('mHours',c_int16), | |
('mMinutes',c_int16), | |
('mSeconds',c_int16), | |
('mFrames',c_int16)] | |
'''/*! | |
@enum AudioTimeStamp Flags | |
@abstract The flags that indicate which fields in an AudioTimeStamp structure are valid. | |
@constant kAudioTimeStampSampleTimeValid | |
The sample frame time is valid. | |
@constant kAudioTimeStampHostTimeValid | |
The host time is valid. | |
@constant kAudioTimeStampRateScalarValid | |
The rate scalar is valid. | |
@constant kAudioTimeStampWordClockTimeValid | |
The word clock time is valid. | |
@constant kAudioTimeStampSMPTETimeValid | |
The SMPTE time is valid. | |
@constant kAudioTimeStampSampleHostTimeValid | |
The sample frame time and the host time are valid. | |
''' | |
class AudioTimeStampFlags(c_uint32): | |
kAudioTimeStampNothingValid = (0) | |
kAudioTimeStampSampleTimeValid = (1<< 0) | |
kAudioTimeStampHostTimeValid = (1 << 1) | |
kAudioTimeStampRateScalarValid = (1 << 2) | |
kAudioTimeStampWordClockTimeValid = (1 << 3) | |
kAudioTimeStampSMPTETimeValid = (1 << 4) | |
kAudioTimeStampSampleHostTimeValid = (kAudioTimeStampSampleTimeValid | kAudioTimeStampHostTimeValid) | |
''' | |
/*! | |
@struct AudioTimeStamp | |
@abstract A structure that holds different representations of the same point in time. | |
@field mSampleTime | |
The absolute sample frame time. | |
@field mHostTime | |
The host machine's time base, mach_absolute_time. | |
@field mRateScalar | |
The ratio of actual host ticks per sample frame to the nominal host ticks | |
per sample frame. | |
@field mWordClockTime | |
The word clock time. | |
@field mSMPTETime | |
The SMPTE time. | |
@field mFlags | |
A set of flags indicating which representations of the time are valid. | |
@field mReserved | |
Pads the structure out to force an even 8 byte alignment. | |
''' | |
class AudioTimeStamp(Structure): | |
_fields_=[('mSampleTime',c_double), | |
('mHostTime',c_int64), | |
('mRateScalar',c_double), | |
('mWordClockTime',c_uint64), | |
('mSMPTETime',SMPTETime), | |
('mFlags',AudioTimeStampFlags), | |
('mReserved',c_uint32)] | |
''' | |
/*! | |
@struct AudioBuffer | |
@abstract A structure to hold a buffer of audio data. | |
@field mNumberChannels | |
The number of interleaved channels in the buffer. | |
@field mDataByteSize | |
The number of bytes in the buffer pointed at by mData. | |
@field mData | |
A pointer to the buffer of audio data. | |
''' | |
class AudioBuffer(Structure): | |
_fields_=[('mNumberChannels',c_uint32), | |
('mDataByteSize',c_uint32), | |
('mData',c_void_p)] | |
class AudioBufferList(Structure): | |
'''/*! | |
@struct AudioBufferList | |
@abstract A variable length array of AudioBuffer structures. | |
@field mNumberBuffers | |
The number of AudioBuffers in the mBuffers array. | |
@field mBuffers | |
A variable length array of AudioBuffers.''' | |
_fields_=[('mNumberBuffers',c_uint32), | |
('mBuffers',AudioBuffer*1)] | |
class AudioComponentDescription(Structure): | |
_fields_=[ | |
('componentType',OSType), | |
('componentSubType',OSType), | |
('componentManufacturer',OSType), | |
('componentFlags',c_uint32), | |
('componentFlagsMask',c_uint32)] | |
AudioComponentFindNext=c.AudioComponentFindNext | |
AudioComponentFindNext.argtypes=[c_void_p, POINTER(AudioComponentDescription)] | |
AudioComponentFindNext.restype=c_void_p | |
AudioComponentInstanceNew=c.AudioComponentInstanceNew | |
AudioComponentInstanceNew.argtypes=[c_void_p, c_void_p] | |
AudioComponentInstanceNew.restype=OSStatus | |
AudioUnitSetProperty=c.AudioUnitSetProperty | |
AudioUnitSetProperty.argtypes=[c_void_p, c_uint32, c_uint32, c_uint32, c_void_p, c_uint32] | |
AudioUnitSetProperty.restype=OSStatus | |
AudioUnitInitialize=c.AudioUnitInitialize | |
AudioUnitInitialize.argtypes=[c_void_p] | |
AudioUnitInitialize.restype=OSStatus | |
c.AudioOutputUnitStop.argtypes=[c_void_p] | |
c.AudioUnitUninitialize.argtypes=[c_void_p] | |
c.AudioComponentInstanceDispose.argtypes=[c_void_p] | |
AudioOutputUnitStart=c.AudioOutputUnitStart | |
AudioOutputUnitStart.argtypes=[c_void_p] | |
AudioOutputUnitStart.restype=OSStatus | |
def render_callback_prototype(inRefCon: c_void_p, | |
ioActionFlags:POINTER(AudioUnitRenderActionFlags), | |
inTimeStamp: POINTER(AudioTimeStamp) , | |
inBusNumber: c_uint32, | |
inNumberFrames: c_uint32, | |
ioData:POINTER(AudioBufferList))->c_uint32: | |
pass | |
AURenderCallbackargs=list(render_callback_prototype.__annotations__.values()) | |
AURenderCallback=CFUNCTYPE(AURenderCallbackargs[-1],*AURenderCallbackargs[0:-1]) | |
class AURenderCallbackStruct(Structure): | |
_fields_=[('inputProc',AURenderCallback), ('inputProcRefCon',c_void_p)] | |
class AudioStreamBasicDescription(Structure): | |
_fields_ = [ | |
("mSampleRate", c_double), | |
("mFormatID", c_uint), | |
("mFormatFlags", c_uint), | |
("mBytesPerPacket", c_uint), | |
("mFramesPerPacket", c_uint), | |
("mBytesPerFrame", c_uint), | |
("mChannelsPerFrame", c_uint), | |
("mBitsPerChannel", c_uint), | |
("mReserved", c_uint), | |
] | |
import math | |
class AudioRenderer(object): | |
def __init__(self, sampleRate=44100): | |
'''Create an audiounit, and bind to this instance''' | |
self.theta={} | |
self.time=0 | |
self.sounds={} | |
self.sampleRate=sampleRate | |
self.toneUnit=None | |
self.perf=(0,0) | |
#self.setup_audiounit() | |
def render_callback(self, | |
inRefCon: c_void_p, | |
ioActionFlags:POINTER(AudioUnitRenderActionFlags), | |
inTimeStamp: POINTER(AudioTimeStamp) , | |
inBusNumber: c_uint32, | |
inNumberFrames: c_uint32, | |
ioData:POINTER(AudioBufferList))->c_uint32: | |
''' call render(buffer,numFrames, timestamp)''' | |
try: | |
buffer = cast(ioData[0].mBuffers[0].mData, POINTER(c_float*inNumberFrames)).contents | |
sampleTime=inTimeStamp[0].mSampleTime | |
return self.render(buffer, inNumberFrames, sampleTime) | |
except Exception as e: | |
print(e) | |
return -999 | |
def render(self, buffer, numFrames, sampleTime): | |
'''override this with a method that fills buffer with numFrames''' | |
b=np.ctypeslib.as_array(buffer) | |
b.fill(0) | |
for g in self.sounds.values(): | |
b[:]=g.pop_samples(numFrames) | |
#memmove(buffer, b.astype(np.float).ctypes.get_data(), sizeof(c_float)*len(b)) | |
''' for frame in range(numFrames): | |
buffer[frame]=b[frame] | |
if self.sounds: | |
tm=(time.perf_counter()-tm) | |
print(100.*tm/(numFrames/self.sampleRate)) | |
self.time=self.time+numFrames/self.sampleRate''' | |
return 0 | |
def setup_audiounit(self): | |
defaultOutputDescription=AudioComponentDescription() | |
defaultOutputDescription.componentType = kAudioUnitType_Output | |
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO | |
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple; | |
defaultOutputDescription.componentFlags = 0; | |
defaultOutputDescription.componentFlagsMask = 0; | |
defaultOutput=AudioComponentFindNext(None, byref(defaultOutputDescription)) | |
toneUnit=c_void_p(0) | |
err = AudioComponentInstanceNew(defaultOutput, byref(toneUnit)) | |
if (err<0): | |
raise Exception(error_number_to_string(err)) | |
print(err) | |
myinput=AURenderCallbackStruct() | |
myinput.inputProc=AURenderCallback(self.render_callback) | |
myinput.inputProcRefCon = UIApplication.sharedApplication().ptr | |
self.myinput=myinput | |
err=AudioUnitSetProperty(toneUnit, | |
kAudioUnitProperty_SetRenderCallback, | |
kAudioUnitScope_Input, | |
0, | |
byref(myinput), | |
sizeof(myinput) ) | |
if (err<0): | |
raise Exception(error_number_to_string(err)) | |
''' | |
// Set the format to 32 bit, single channel, floating point, linear PCM''' | |
streamFormat=AudioStreamBasicDescription() | |
streamFormat.mSampleRate = self.sampleRate; | |
streamFormat.mFormatID = kAudioFormatLinearPCM; | |
streamFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved; | |
streamFormat.mBytesPerPacket = 4 | |
streamFormat.mFramesPerPacket = 1; | |
streamFormat.mBytesPerFrame = 4 | |
streamFormat.mChannelsPerFrame = 1; | |
streamFormat.mBitsPerChannel = 4*8 | |
self.streamFormat=streamFormat | |
err = AudioUnitSetProperty (toneUnit, | |
kAudioUnitProperty_StreamFormat, | |
kAudioUnitScope_Input, | |
0, | |
byref(streamFormat), | |
sizeof(AudioStreamBasicDescription)) | |
if (err<0): | |
raise Exception(error_number_to_string(err)) | |
err = AudioUnitInitialize(toneUnit); | |
if (err<0): | |
raise Exception(error_number_to_string(err)) | |
self.toneUnit=toneUnit | |
def start(self): | |
''' | |
// Start playback | |
''' | |
if not self.toneUnit: | |
self.setup_audiounit() | |
err = AudioOutputUnitStart(self.toneUnit); | |
if (err<0): | |
raise Exception(error_number_to_string(err)) | |
def stop(self): | |
'''// Tear it down in reverse''' | |
toneUnit=self.toneUnit | |
c.AudioOutputUnitStop(toneUnit); | |
c.AudioUnitUninitialize(toneUnit); | |
c.AudioComponentInstanceDispose(toneUnit); | |
self.toneUnit=None | |
def _del__(self): | |
self.stop() | |
import time, ui | |
if __name__=='__main__': | |
import tonegenerator | |
fs=44100 | |
fu=40 | |
from collections import deque | |
r=AudioRenderer(fs) | |
class Theramin(ui.View): | |
def __init__(self,*args,**kwargs): | |
ui.View.__init__(self,*args,**kwargs) | |
self.touches={} | |
self.multitouch_enabled=True | |
self.gen_queue=deque() | |
for i in range(8): | |
self.gen_queue.append(tonegenerator.SawToothGenerator(1,440,fs,fu/4)) | |
self.update_interval=1/fu | |
def update(self): | |
#generate samples even when finger is not moving | |
t=time.perf_counter() | |
for g in r.sounds.values(): | |
g.buffer_samps(t) | |
self.set_needs_display() | |
def draw(self): | |
for t in self.touches: | |
'''draw a finger | |
color goes as frequency, size as volume | |
''' | |
touch=self.touches[t] | |
g=r.sounds[t] | |
a=g.a | |
f=g.f | |
u=g.underflow | |
o=g.overflow | |
R=a*150 | |
ui.set_color(( (f%9000)/2500,(f%9000)/10000, (f%14000)/14000 )) | |
ui.Path.oval(touch.location.x-R,touch.location.y-R, 2*R,2*R).fill() | |
ui.draw_string('{}'.format((u,o,(g._ri-g._li)%len(g.buf))), (30,30,0,0)) | |
def touch_began(self, touch): | |
'''assign and charge up a generator''' | |
self.touches[touch.touch_id]=touch | |
g=self.gen_queue.pop() | |
g.t=touch.timestamp-1/fu | |
f,a=self.touch_to_sound(touch) | |
g.update_params(a,f) | |
g.buffer_samps(touch.timestamp) | |
r.sounds[touch.touch_id]=g | |
self.set_needs_display() | |
def touch_moved(self,touch): | |
if touch.touch_id in self.touches: | |
self.touches[touch.touch_id]=touch | |
g=r.sounds[touch.touch_id] | |
f,a=self.touch_to_sound(touch) | |
g.update_params(a,f) | |
g.buffer_samps(touch.timestamp) | |
self.set_needs_display() | |
def touch_ended(self,touch): | |
if touch.touch_id in self.touches: | |
del self.touches[touch.touch_id] | |
g= r.sounds.pop(touch.touch_id) | |
g.update_params(0,0) | |
g.buffer_samps(touch.timestamp) | |
self.gen_queue.append(g) | |
self.set_needs_display() | |
def touch_to_sound(self,t): | |
f=50+4000*t.location.x/self.width | |
a=t.location.y/self.height | |
return (f,a) | |
def will_close(self): | |
r.stop() | |
v=Theramin(frame=[0,0,576,576],bg_color='white') | |
b=ui.Button(frame=[0,0,100,100]) | |
b.title='Go' | |
def toggle(sender): | |
if r.toneUnit: | |
r.stop() | |
sender.title='GO' | |
else: | |
r.start() | |
print(r.toneUnit) | |
sender.title='STOP' | |
b.action=toggle | |
v.add_subview(b) | |
v.present('sheet') | |
#r.start() | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
kAudioObjectSystemObject = 1 | |
kAudioHardwarePropertyDevices = int.from_bytes(b'dev#', byteorder='big') | |
kAudioHardwarePropertyDefaultInputDevice = int.from_bytes(b'dIn ', byteorder='big') | |
kAudioHardwarePropertyDefaultOutputDevice = int.from_bytes(b'dOut', byteorder='big') | |
kAudioObjectPropertyScopeGlobal = int.from_bytes(b'glob', byteorder='big') | |
kAudioObjectPropertyScopeInput = int.from_bytes(b'inpt', byteorder='big') | |
kAudioObjectPropertyScopeOutput = int.from_bytes(b'outp', byteorder='big') | |
kAudioObjectPropertyScopePlayThrough = int.from_bytes(b'ptru', byteorder='big') | |
kAudioObjectPropertyName = int.from_bytes(b'lnam', byteorder='big') | |
kAudioObjectPropertyModelName = int.from_bytes(b'lmod', byteorder='big') | |
kAudioObjectPropertyManufacturer = int.from_bytes(b'lmak', byteorder='big') | |
kAudioDevicePropertyNominalSampleRate = int.from_bytes(b'nsrt', byteorder='big') | |
kAudioDevicePropertyBufferFrameSize = int.from_bytes(b'fsiz', byteorder='big') | |
kAudioDevicePropertyBufferFrameSizeRange = int.from_bytes(b'fsz#', byteorder='big') | |
kAudioDevicePropertyUsesVariableBufferFrameSizes = int.from_bytes(b'vfsz', byteorder='big') | |
kAudioDevicePropertyStreamConfiguration = int.from_bytes(b'slay', byteorder='big') | |
kCFStringEncodingUTF8 = 0x08000100 | |
kAudioObjectPropertyElementMaster = 0 | |
kAudioUnitType_Output = int.from_bytes(b'auou', byteorder='big') | |
kAudioUnitManufacturer_Apple = int.from_bytes(b'appl', byteorder='big') | |
kAudioUnitSubType_GenericOutput = int.from_bytes(b'genr', byteorder='big') | |
kAudioUnitSubType_HALOutput = int.from_bytes(b'ahal', byteorder='big') | |
kAudioUnitSubType_DefaultOutput = int.from_bytes(b'def ', byteorder='big') | |
kAudioUnitSubType_RemoteIO = int.from_bytes(b'rioc', byteorder='big') | |
# The audio unit can do input from the device as well as output to the | |
# device. Bus 0 is used for the output side, bus 1 is used to get audio | |
# input from the device. | |
outputbus = 0 | |
inputbus = 1 | |
def error_number_to_string(num): | |
if num == kAudioUnitErr_InvalidProperty: | |
return "The property is not supported" | |
elif num == kAudioUnitErr_InvalidParameter: | |
return "The parameter is not supported" | |
elif num == kAudioUnitErr_InvalidElement: | |
return "The specified element is not valid" | |
elif num == kAudioUnitErr_NoConnection: | |
return "There is no connection (generally an audio unit is asked to render but it has" \ | |
" not input from which to gather data)" | |
elif num == kAudioUnitErr_FailedInitialization: | |
return "The audio unit is unable to be initialized" | |
elif num == kAudioUnitErr_TooManyFramesToProcess: | |
return "When an audio unit is initialized it has a value which specifies the max" \ | |
" number of frames it will be asked to render at any given time. If an audio" \ | |
" unit is asked to render more than this, this error is returned." | |
elif num == kAudioUnitErr_InvalidFile: | |
return "If an audio unit uses external files as a data source, this error is returned" \ | |
" if a file is invalid (Apple's DLS synth returns this error)" | |
elif num == kAudioUnitErr_UnknownFileType: | |
return "If an audio unit uses external files as a data source, this error is returned" \ | |
" if a file is invalid (Apple's DLS synth returns this error)" | |
elif num == kAudioUnitErr_FileNotSpecified: | |
return "If an audio unit uses external files as a data source, this error is returned" \ | |
" if a file hasn't been set on it (Apple's DLS synth returns this error)" | |
elif num == kAudioUnitErr_FormatNotSupported: | |
return "Returned if an input or output format is not supported" | |
elif num == kAudioUnitErr_Uninitialized: | |
return "Returned if an operation requires an audio unit to be initialized and it is not." | |
elif num == kAudioUnitErr_InvalidScope: | |
return "The specified scope is invalid" | |
elif num == kAudioUnitErr_PropertyNotWritable: | |
return "The property cannot be written" | |
elif num == kAudioUnitErr_CannotDoInCurrentContext: | |
return "Returned when an audio unit is in a state where it can't perform the requested" \ | |
" action now - but it could later. Its usually used to guard a render operation" \ | |
" when a reconfiguration of its internal state is being performed." | |
elif num == kAudioUnitErr_InvalidPropertyValue: | |
return "The property is valid, but the value of the property being provided is not" | |
elif num == kAudioUnitErr_PropertyNotInUse: | |
return "Returned when a property is valid, but it hasn't been set to a valid value at this time." | |
elif num == kAudioUnitErr_Initialized: | |
return "Indicates the operation cannot be performed because the audio unit is initialized." | |
elif num == kAudioUnitErr_InvalidOfflineRender: | |
return "Used to indicate that the offline render operation is invalid. For instance," \ | |
" when the audio unit needs to be pre-flighted, but it hasn't been." | |
elif num == kAudioUnitErr_Unauthorized: | |
return "Returned by either Open or Initialize, this error is used to indicate that the" \ | |
" audio unit is not authorised, that it cannot be used. A host can then present" \ | |
" a UI to notify the user the audio unit is not able to be used in its current state." | |
elif num == kAudioComponentErr_InstanceInvalidated: | |
return "the component instance's implementation is not available, most likely because the process" \ | |
" that published it is no longer running" | |
else: | |
return "error number {}".format(num) | |
kAudioUnitErr_InvalidProperty = -10879 | |
kAudioUnitErr_InvalidParameter = -10878 | |
kAudioUnitErr_InvalidElement = -10877 | |
kAudioUnitErr_NoConnection = -10876 | |
kAudioUnitErr_FailedInitialization = -10875 | |
kAudioUnitErr_TooManyFramesToProcess = -10874 | |
kAudioUnitErr_InvalidFile = -10871 | |
kAudioUnitErr_UnknownFileType = -10870 | |
kAudioUnitErr_FileNotSpecified = -10869 | |
kAudioUnitErr_FormatNotSupported = -10868 | |
kAudioUnitErr_Uninitialized = -10867 | |
kAudioUnitErr_InvalidScope = -10866 | |
kAudioUnitErr_PropertyNotWritable = -10865 | |
kAudioUnitErr_CannotDoInCurrentContext = -10863 | |
kAudioUnitErr_InvalidPropertyValue = -10851 | |
kAudioUnitErr_PropertyNotInUse = -10850 | |
kAudioUnitErr_Initialized = -10849 | |
kAudioUnitErr_InvalidOfflineRender = -10848 | |
kAudioUnitErr_Unauthorized = -10847 | |
kAudioComponentErr_InstanceInvalidated = -66749 | |
kAudioUnitErr_RenderTimeout = -66745 | |
kAudioOutputUnitProperty_CurrentDevice = 2000 | |
kAudioOutputUnitProperty_EnableIO = 2003 # scope output, element 0 == output, | |
kAudioOutputUnitProperty_HasIO = 2006 # scope input, element 1 == input | |
kAudioOutputUnitProperty_IsRunning = 2001 | |
kAudioOutputUnitProperty_ChannelMap = 2002 | |
kAudioFormatLinearPCM = int.from_bytes(b'lpcm', byteorder='big') | |
kAudioFormatFlagIsFloat = 0x1 | |
kAudioFormatFlagIsNonInterleaved = (1<< 5) | |
kAudioFormatFlagsNativeEndian =0 | |
kAudioFormatFlagIsPacked = (1 << 3) | |
kAudioFormatFlagsNativeFloatPacked = kAudioFormatFlagIsFloat | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | |
kAudioUnitProperty_StreamFormat = 8 | |
kAudioUnitProperty_CPULoad = 6 | |
kAudioUnitProperty_Latency = 12 | |
kAudioUnitProperty_SupportedNumChannels = 13 | |
kAudioUnitProperty_MaximumFramesPerSlice = 14 | |
kAudioUnitProperty_SetRenderCallback = 23 | |
kAudioOutputUnitProperty_SetInputCallback = 2005 | |
kAudioUnitProperty_StreamFormat = 8 | |
kAudioUnitProperty_SampleRate = 2 | |
kAudioUnitProperty_ContextName = 25 | |
kAudioUnitProperty_ElementName = 30 | |
kAudioUnitProperty_NickName = 54 | |
kAudioUnitScope_Global = 0 # The context for audio unit characteristics that apply to the audio unit as a whole | |
kAudioUnitScope_Input = 1 # The context for audio data coming into an audio unit | |
kAudioUnitScope_Output = 2 # The context for audio data leaving an audio unit |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
''' experiment: generate audio within update, at 60Hz, filling a circular buffer. Then, during the render callback, we simply pop out the number of requested frames out. | |
the buffer filler tries to fill dt*sampleRate frames. if the buffer is getting empty, fill more. if it is getting full, fill less. | |
''' | |
import numpy as np | |
class ToneGenerator(object): | |
'''represents a sine wave generator, with internal buffering. | |
periodically, such as from update or touch_moved, call update(t,a,f). | |
this generates samples to populate the segment of time since the last update. | |
assume updates at least as frequent as fu, fs sampling freq, f is tone, a ampl | |
subclass to override synthesis. buffering handled by superclass | |
''' | |
def __init__(self,a, f,fs,fu): | |
self.buf=np.zeros(np.ceil(2*fs/fu)) | |
self._li=0 #left index | |
self._ri=0 #right index | |
self.a=a | |
self.w=2*np.pi*f #use angular freq for speed later | |
self.fs=fs | |
self.q=0 | |
self.t=0 | |
self.underflow=0 | |
self.overflow=0 | |
def update_params(self,a,f): | |
'''update the ampl/freq''' | |
self.a=a | |
self.f=f | |
@property | |
def f(self): | |
return self.w/2/np.pi | |
@f.setter | |
def f(self,fnew): | |
'''for smooth phase, take average freq *1 samp to compute new phase. ''' | |
self.q+=np.pi*(self.f+fnew)/self.fs | |
self.w=2*np.pi*fnew | |
@property | |
def a(self): | |
return self._a | |
@a.setter | |
def a(self,value): | |
self._a=value | |
def render_samps(self,q): | |
'''this is overriden by subclasses. given phase (nparray), output np array''' | |
return self.a*np.sin(q) | |
def buffer_samps(self,t): | |
'''buffer samples to right side of queue. if buffer is full, just stop filling''' | |
Nsamps=np.ceil((t-self.t)*self.fs) | |
#write into buf starting at ri, to either lv-1 (overflow) or len(buf), then wrap around and go 0-new rv | |
ri=self._ri | |
li=self._li | |
buf=self.buf.view() | |
# Nactual will be the lesser of N, and len(buf)-1-np.mod(ri-li,len(buf)) | |
Nactual=min(len(buf)-1-np.mod(ri-li,len(buf)),Nsamps) | |
if Nactual>0: | |
t=(1+np.arange(Nactual,dtype=np.float64))/self.fs | |
q=np.mod(self.w*t+self.q,np.pi*2) | |
y=self.render_samps(q) | |
self.t =t[-1]+self.t | |
self.q=q[-1] | |
# fill from ri to len(buf), and 0 to (ri+Nact) % len(buf) | |
id1=min(len(buf), ri+Nactual) | |
id2=np.mod(max(ri+Nactual,len(buf)),len(buf)) | |
buf[ri:id1]=y[0:(id1-ri)] | |
buf[0:id2]=y[(id1-ri):] | |
self._ri=np.mod(ri+Nactual,len(buf)) | |
if Nactual<Nsamps: | |
self.overflow+=1 | |
def pop_samples(self,N): | |
'''pop N samples from the left side of the buffer''' | |
ri=self._ri | |
li=self._li | |
buf=self.buf.view() | |
Nactual=min(N,len(buf)-1,np.mod(ri-li,len(buf))) | |
id1=min(li+Nactual,len(buf)) | |
id2=np.mod(max(li+Nactual,len(buf)),len(buf)) | |
y=np.concatenate([buf[li:id1], buf[0:id2], np.zeros(N-Nactual)]) | |
#print(N,Nactual,len(y)) | |
self._li=np.mod(li+Nactual,len(buf)) | |
if Nactual<N: | |
self.underflow+=1 | |
return y | |
class SawToothGenerator(ToneGenerator): | |
def render_samps(self,q): | |
return self.a*(np.mod(q,2.*np.pi)-np.pi)/np.pi #-1..1 | |
class TriangleWaveGenerator(ToneGenerator): | |
def render_samps(self,q): | |
# -1..1, 1..-1 | |
return self.a*(np.mod(q,np.pi)-np.pi/2)/(np.pi/2)*np.sign(np.mod(q,2*np.pi)-np.pi) | |
def filterGenerator(g,a,b): | |
#return generator whose render_samps is patched to pass through fir filter, and store coefs. | |
old_render=g.render_samps | |
#add filter states... | |
def render_samps(self,q): | |
y= old_render(self, q) | |
#todo...add filter, and state variables,etc | |
yf=y | |
return yf | |
g.render_samps=render_samps | |
return g |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment