-
-
Save michaeldorner/746c659476429a86a9970faaa6f95ec4 to your computer and use it in GitHub Desktop.
import AVFoundation | |
import Foundation | |
// The single FM synthesizer instance. | |
private let gFMSynthesizer: FMSynthesizer = FMSynthesizer() | |
public class FMSynthesizer { | |
// The maximum number of audio buffers in flight. Setting to two allows one | |
// buffer to be played while the next is being written. | |
private let kInFlightAudioBuffers: Int = 2 | |
// The number of audio samples per buffer. A lower value reduces latency for | |
// changes but requires more processing but increases the risk of being unable | |
// to fill the buffers in time. A setting of 1024 represents about 23ms of | |
// samples. | |
private let kSamplesPerBuffer: AVAudioFrameCount = 1024 | |
// The audio engine manages the sound system. | |
private let audioEngine: AVAudioEngine = AVAudioEngine() | |
// The player node schedules the playback of the audio buffers. | |
private let playerNode: AVAudioPlayerNode = AVAudioPlayerNode() | |
// Use standard non-interleaved PCM audio. | |
private let audioFormat = AVAudioFormat(standardFormatWithSampleRate: 44100.0, channels: 2) | |
// A circular queue of audio buffers. | |
private let audioBuffers: [AVAudioPCMBuffer] = [AVAudioPCMBuffer]() | |
// The index of the next buffer to fill. | |
private var bufferIndex: Int = 0 | |
// The dispatch queue to render audio samples. | |
private let audioQueue: dispatch_queue_t = dispatch_queue_create("FMSynthesizerQueue", DISPATCH_QUEUE_SERIAL) | |
// A semaphore to gate the number of buffers processed. | |
private let audioSemaphore: dispatch_semaphore_t | |
public class func sharedSynth() -> FMSynthesizer { | |
return gFMSynthesizer | |
} | |
private init() { | |
// init the semaphore | |
audioSemaphore = dispatch_semaphore_create(kInFlightAudioBuffers) | |
// Create a pool of audio buffers. | |
audioBuffers = [AVAudioPCMBuffer](count: 2, repeatedValue: AVAudioPCMBuffer(PCMFormat: audioFormat, frameCapacity: UInt32(kSamplesPerBuffer))) | |
// Attach and connect the player node. | |
audioEngine.attachNode(playerNode) | |
audioEngine.connect(playerNode, to: audioEngine.mainMixerNode, format: audioFormat) | |
do { | |
try audioEngine.start() | |
} catch { | |
print("AudioEngine didn't start") | |
} | |
NSNotificationCenter.defaultCenter().addObserver(self, selector: #selector(FMSynthesizer.audioEngineConfigurationChange(_:)), name: AVAudioEngineConfigurationChangeNotification, object: audioEngine) | |
} | |
public func play(carrierFrequency: Float32, modulatorFrequency: Float32, modulatorAmplitude: Float32) { | |
let unitVelocity = Float32(2.0 * M_PI / audioFormat.sampleRate) | |
let carrierVelocity = carrierFrequency * unitVelocity | |
let modulatorVelocity = modulatorFrequency * unitVelocity | |
dispatch_async(audioQueue) { | |
var sampleTime: Float32 = 0 | |
while true { | |
// Wait for a buffer to become available. | |
dispatch_semaphore_wait(self.audioSemaphore, DISPATCH_TIME_FOREVER) | |
// Fill the buffer with new samples. | |
let audioBuffer = self.audioBuffers[self.bufferIndex] | |
let leftChannel = audioBuffer.floatChannelData[0] | |
let rightChannel = audioBuffer.floatChannelData[1] | |
for sampleIndex in 0 ..< Int(self.kSamplesPerBuffer) { | |
let sample = sin(carrierVelocity * sampleTime + modulatorAmplitude * sin(modulatorVelocity * sampleTime)) | |
leftChannel[sampleIndex] = sample | |
rightChannel[sampleIndex] = sample | |
sampleTime = sampleTime + 1.0 | |
} | |
audioBuffer.frameLength = self.kSamplesPerBuffer | |
// Schedule the buffer for playback and release it for reuse after | |
// playback has finished. | |
self.playerNode.scheduleBuffer(audioBuffer) { | |
dispatch_semaphore_signal(self.audioSemaphore) | |
return | |
} | |
self.bufferIndex = (self.bufferIndex + 1) % self.audioBuffers.count | |
} | |
} | |
playerNode.pan = 0.8 | |
playerNode.play() | |
} | |
@objc private func audioEngineConfigurationChange(notification: NSNotification) -> Void { | |
NSLog("Audio engine configuration change: \(notification)") | |
} | |
} | |
// Play a bell sound: | |
// FMSynthesizer.sharedSynth().play(440.0, modulatorFrequency: 679.0, modulatorAmplitude: 0.8) |
I got it working but its probably not pretty and has warnings:
import AVFoundation
import Foundation
// The single FM synthesizer instance.
let gFMSynthesizer: FMSynthesizer = FMSynthesizer()
class FMSynthesizer {
// The maximum number of audio buffers in flight. Setting to two allows one
// buffer to be played while the next is being written.
var kInFlightAudioBuffers: Int = 2
// The number of audio samples per buffer. A lower value reduces latency for
// changes but requires more processing but increases the risk of being unable
// to fill the buffers in time. A setting of 1024 represents about 23ms of
// samples.
let kSamplesPerBuffer: AVAudioFrameCount = 1024
// The audio engine manages the sound system.
let audioEngine: AVAudioEngine = AVAudioEngine()
// The player node schedules the playback of the audio buffers.
let playerNode: AVAudioPlayerNode = AVAudioPlayerNode()
// Use standard non-interleaved PCM audio.
let audioFormat = AVAudioFormat(standardFormatWithSampleRate: 44100.0, channels: 1)
// A circular queue of audio buffers.
var audioBuffers: [AVAudioPCMBuffer] = [AVAudioPCMBuffer]()
// The index of the next buffer to fill.
var bufferIndex: Int = 0
// The dispatch queue to render audio samples.
let audioQueue: DispatchQueue = DispatchQueue(label: "FMSynthesizerQueue", attributes: [])
// A semaphore to gate the number of buffers processed.
let audioSemaphore: DispatchSemaphore
class func sharedSynth() -> FMSynthesizer {
return gFMSynthesizer
}
public init() {
// init the semaphore
audioSemaphore = DispatchSemaphore(value: kInFlightAudioBuffers)
// Create a pool of audio buffers.
audioBuffers = [AVAudioPCMBuffer](repeating: AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: UInt32(kSamplesPerBuffer)), count: 2)
// Attach and connect the player node.
audioEngine.attach(playerNode)
audioEngine.connect(playerNode, to: audioEngine.mainMixerNode, format: audioFormat)
do {
try audioEngine.start()
} catch {
print("AudioEngine didn't start")
}
NotificationCenter.default.addObserver(self, selector: #selector(FMSynthesizer.audioEngineConfigurationChange(_:)), name: NSNotification.Name.AVAudioEngineConfigurationChange, object: audioEngine)
}
func play(_ carrierFrequency: Float32, modulatorFrequency: Float32, modulatorAmplitude: Float32) {
let unitVelocity = Float32(2.0 * M_PI / audioFormat.sampleRate)
let carrierVelocity = carrierFrequency * unitVelocity
let modulatorVelocity = modulatorFrequency * unitVelocity
audioQueue.async {
var sampleTime: Float32 = 0
while true {
// Wait for a buffer to become available.
self.audioSemaphore.wait(timeout: DispatchTime.distantFuture)
// Fill the buffer with new samples.
let audioBuffer = self.audioBuffers[self.bufferIndex]
let leftChannel = audioBuffer.floatChannelData?[0]
let rightChannel = audioBuffer.floatChannelData?[1]
for sampleIndex in 0 ..< Int(self.kSamplesPerBuffer) {
let sample = sin(carrierVelocity * sampleTime + modulatorAmplitude * sin(modulatorVelocity * sampleTime))
leftChannel?[sampleIndex] = sample
rightChannel?[sampleIndex] = sample
sampleTime = sampleTime + 1.0
}
audioBuffer.frameLength = self.kSamplesPerBuffer
// Schedule the buffer for playback and release it for reuse after
// playback has finished.
self.playerNode.scheduleBuffer(audioBuffer) {
self.audioSemaphore.signal()
return
}
self.bufferIndex = (self.bufferIndex + 1) % self.audioBuffers.count
}
}
playerNode.pan = 0.8
playerNode.play()
}
@objc func audioEngineConfigurationChange(_ notification: Notification) -> Void {
NSLog("Audio engine configuration change: \(notification)")
}
}
// Play a bell sound:
// FMSynthesizer.sharedSynth().play(440.0, modulatorFrequency: 679.0, modulatorAmplitude: 0.8)
Should be:
let audioFormat = AVAudioFormat(standardFormatWithSampleRate: 44100.0, channels: 2)
Swift 4:
`import AVFoundation
import Foundation
// The single FM synthesizer instance.
let gFMSynthesizer: FMSynthesizer = FMSynthesizer()
class FMSynthesizer {
// The maximum number of audio buffers in flight. Setting to two allows one
// buffer to be played while the next is being written.
var kInFlightAudioBuffers: Int = 2
// The number of audio samples per buffer. A lower value reduces latency for
// changes but requires more processing but increases the risk of being unable
// to fill the buffers in time. A setting of 1024 represents about 23ms of
// samples.
let kSamplesPerBuffer: AVAudioFrameCount = 1024
// The audio engine manages the sound system.
let audioEngine: AVAudioEngine = AVAudioEngine()
// The player node schedules the playback of the audio buffers.
let playerNode: AVAudioPlayerNode = AVAudioPlayerNode()
// Use standard non-interleaved PCM audio.
let audioFormat = AVAudioFormat(standardFormatWithSampleRate: 44100.0, channels: 1)
// A circular queue of audio buffers.
var audioBuffers: [AVAudioPCMBuffer] = [AVAudioPCMBuffer]()
// The index of the next buffer to fill.
var bufferIndex: Int = 0
// The dispatch queue to render audio samples.
let audioQueue: DispatchQueue = DispatchQueue(label: "FMSynthesizerQueue", attributes: [])
// A semaphore to gate the number of buffers processed.
let audioSemaphore: DispatchSemaphore
class func sharedSynth() -> FMSynthesizer {
return gFMSynthesizer
}
public init() {
// init the semaphore
audioSemaphore = DispatchSemaphore(value: kInFlightAudioBuffers)
// Create a pool of audio buffers.
audioBuffers = [AVAudioPCMBuffer](repeating: AVAudioPCMBuffer(pcmFormat: audioFormat!, frameCapacity: UInt32(kSamplesPerBuffer))!, count: 2)
// Attach and connect the player node.
audioEngine.attach(playerNode)
audioEngine.connect(playerNode, to: audioEngine.mainMixerNode, format: audioFormat)
do {
try audioEngine.start()
} catch {
print("AudioEngine didn't start")
}
NotificationCenter.default.addObserver(self, selector: #selector(FMSynthesizer.audioEngineConfigurationChange(_:)), name: NSNotification.Name.AVAudioEngineConfigurationChange, object: audioEngine)
}
func play(_ carrierFrequency: Float32, modulatorFrequency: Float32, modulatorAmplitude: Float32) {
let unitVelocity = Float32(2.0 * M_PI / (audioFormat?.sampleRate)!)
let carrierVelocity = carrierFrequency * unitVelocity
let modulatorVelocity = modulatorFrequency * unitVelocity
audioQueue.async {
var sampleTime: Float32 = 0
while true {
// Wait for a buffer to become available.
self.audioSemaphore.wait(timeout: DispatchTime.distantFuture)
// Fill the buffer with new samples.
let audioBuffer = self.audioBuffers[self.bufferIndex]
let leftChannel = audioBuffer.floatChannelData?[0]
let rightChannel = audioBuffer.floatChannelData?[1]
for sampleIndex in 0 ..< Int(self.kSamplesPerBuffer) {
let sample = sin(carrierVelocity * sampleTime + modulatorAmplitude * sin(modulatorVelocity * sampleTime))
leftChannel?[sampleIndex] = sample
rightChannel?[sampleIndex] = sample
sampleTime = sampleTime + 1.0
}
audioBuffer.frameLength = self.kSamplesPerBuffer
// Schedule the buffer for playback and release it for reuse after
// playback has finished.
self.playerNode.scheduleBuffer(audioBuffer) {
self.audioSemaphore.signal()
return
}
self.bufferIndex = (self.bufferIndex + 1) % self.audioBuffers.count
}
}
playerNode.pan = 0.8
playerNode.play()
}
@objc func audioEngineConfigurationChange(_ notification: Notification) -> Void {
NSLog("Audio engine configuration change: \(notification)")
}
}
// Play a bell sound:
// FMSynthesizer.sharedSynth().play(440.0, modulatorFrequency: 679.0, modulatorAmplitude: 0.8)`
This line:
// Create a pool of audio buffers.
audioBuffers = [AVAudioPCMBuffer](repeating: AVAudioPCMBuffer(pcmFormat: audioFormat!,
frameCapacity: UInt32(kSamplesPerBuffer))!,
count: 2)
is not doing what we hope here. It creates a single AVAudioPCMBuffer
and references it twice in the array instead of creating two separate buffers.
The following code will do what we expect.
var audioBuffers = [AVAudioPCMBuffer]()
for _ in 0..<kInFlightAudioBuffers {
audioBuffers.append(AVAudioPCMBuffer(pcmFormat: audioFormat!,
frameCapacity: kSamplesPerBuffer)!)
}
The var at the start creates a local variable instead of modifying the existing audioBuffers array.
`audioBuffers = AVAudioPCMBuffer
for _ in 0..<kInFlightAudioBuffers {
audioBuffers.append(AVAudioPCMBuffer(pcmFormat: audioFormat!,
frameCapacity: kSamplesPerBuffer)!)
}`
Probably took me longer than it should have to realise why it wasn't working, haha
Hi guys, any clue on why the audio "cracks" when i power off the screen?
i've the Audio background mode active and i do set up the session
hmm, I get stuttering audio when I try to do this in a playground, any idea why?
This works in Swift 5 but I get this warning on the line:
self.audioSemaphore.wait(timeout: DispatchTime.distantFuture)
"Thread running at QOS_CLASS_USER_INITIATED waiting on a lower QoS thread running at QOS_CLASS_DEFAULT. Investigate ways to avoid priority inversions"
/////////////////////
import AVFoundation
import Foundation
// The single FM synthesizer instance.
let gFMSynthesizer: FMSynthesizer = FMSynthesizer()
class FMSynthesizer {
// The maximum number of audio buffers in flight. Setting to two allows one
// buffer to be played while the next is being written.
var kInFlightAudioBuffers: Int = 2
// The number of audio samples per buffer. A lower value reduces latency for
// changes but requires more processing but increases the risk of being unable
// to fill the buffers in time. A setting of 1024 represents about 23ms of
// samples.
let kSamplesPerBuffer: AVAudioFrameCount = 1024
// The audio engine manages the sound system.
let audioEngine: AVAudioEngine = AVAudioEngine()
// The player node schedules the playback of the audio buffers.
let playerNode: AVAudioPlayerNode = AVAudioPlayerNode()
// Use standard non-interleaved PCM audio.
let audioFormat = AVAudioFormat(standardFormatWithSampleRate: 44100.0, channels: 2)
// A circular queue of audio buffers.
var audioBuffers: [AVAudioPCMBuffer] = [AVAudioPCMBuffer]()
// The index of the next buffer to fill.
var bufferIndex: Int = 0
// The dispatch queue to render audio samples.
let audioQueue: DispatchQueue = DispatchQueue(label: "FMSynthesizerQueue", attributes: [])
// A semaphore to gate the number of buffers processed.
let audioSemaphore: DispatchSemaphore
class func sharedSynth() -> FMSynthesizer {
return gFMSynthesizer
}
public init() {
// init the semaphore
audioSemaphore = DispatchSemaphore(value: kInFlightAudioBuffers)
audioBuffers = [AVAudioPCMBuffer]()
for _ in 0..<kInFlightAudioBuffers {
audioBuffers.append(AVAudioPCMBuffer(pcmFormat: audioFormat!, frameCapacity: kSamplesPerBuffer)!)
}
// Attach and connect the player node.
audioEngine.attach(playerNode)
audioEngine.connect(playerNode, to: audioEngine.mainMixerNode, format: audioFormat)
do {
try audioEngine.start()
} catch {
print("AudioEngine didn't start")
}
NotificationCenter.default.addObserver(self, selector: #selector(FMSynthesizer.audioEngineConfigurationChange(_:)), name: NSNotification.Name.AVAudioEngineConfigurationChange, object: audioEngine)
}
func play(_ carrierFrequency: Float32, modulatorFrequency: Float32, modulatorAmplitude: Float32) {
let unitVelocity = Float32(2.0 * Double.pi / (audioFormat?.sampleRate)!)
let carrierVelocity = carrierFrequency * unitVelocity
let modulatorVelocity = modulatorFrequency * unitVelocity
audioQueue.async {
var sampleTime: Float32 = 0
while true {
// Wait for a buffer to become available.
self.audioSemaphore.wait(timeout: DispatchTime.distantFuture)
// Fill the buffer with new samples.
let audioBuffer = self.audioBuffers[self.bufferIndex]
let leftChannel = audioBuffer.floatChannelData?[0]
let rightChannel = audioBuffer.floatChannelData?[1]
for sampleIndex in 0 ..< Int(self.kSamplesPerBuffer) {
let sample = sin(carrierVelocity * sampleTime + modulatorAmplitude * sin(modulatorVelocity * sampleTime))
leftChannel?[sampleIndex] = sample
rightChannel?[sampleIndex] = sample
sampleTime = sampleTime + 1.0
}
audioBuffer.frameLength = self.kSamplesPerBuffer
// Schedule the buffer for playback and release it for reuse after
// playback has finished.
self.playerNode.scheduleBuffer(audioBuffer) {
self.audioSemaphore.signal()
return
}
self.bufferIndex = (self.bufferIndex + 1) % self.audioBuffers.count
}
}
playerNode.pan = 0.8
playerNode.play()
}
@objc func audioEngineConfigurationChange(_ notification: Notification) -> Void {
NSLog("Audio engine configuration change: \(notification)")
}
}
// Play a bell sound:
// FMSynthesizer.sharedSynth().play(440.0, modulatorFrequency: 679.0, modulatorAmplitude: 0.8)
@fcesc Thank you so much, this approach works fine for me and I can play a continuous sound with that code.
Can you tell me how I can play a sequence of AudioPcmBuffers with that approach, please? Each AudioPcmBuffer has also a size of 1024.
The Salt of all the above code in Swift 6:
import AVFoundation
import Foundation
let sampleRate = 44100.0
let channels: UInt32 = 1
let carrierFrequency: Float32 = 440.0
let modulatorFrequency: Float32 = 679.0
let modulatorAmplitude: Float32 = 0.8
let unitVelocity = Float32(2.0 * .pi / sampleRate)
let carrierVelocity = carrierFrequency * unitVelocity
let modulatorVelocity = modulatorFrequency * unitVelocity
let samplesPerBuffer: AVAudioFrameCount = 1024 * 16
let engine = AVAudioEngine()
let player = AVAudioPlayerNode()
let format = AVAudioFormat(
standardFormatWithSampleRate: sampleRate,
channels: channels
)
do {
engine.attach(player)
engine.connect(player, to: engine.mainMixerNode, format: format)
try engine.start()
var sampleTime: Float32 = 0
if let audioBuffer = AVAudioPCMBuffer(pcmFormat: format!, frameCapacity: samplesPerBuffer) {
let channelL = audioBuffer.floatChannelData?[0]
let channelR = audioBuffer.floatChannelData?[1]
for sampleIndex in 0..<Int(samplesPerBuffer) {
let sample = sin(carrierVelocity * sampleTime + modulatorAmplitude * sin(modulatorVelocity * sampleTime))
channelL?[sampleIndex] = sample
channelR?[sampleIndex] = sample
sampleTime += 1.0
}
audioBuffer.frameLength = samplesPerBuffer
player.scheduleBuffer(
audioBuffer
)
}
player.pan = 0.8
player.play()
} catch {
print("Error: \(error).")
}
eh? swift 3 version?
I am a swift noob at the moment so all those errors look pretty scary right now.