Created
April 28, 2017 14:17
-
-
Save SeanLintern/3a78b3b40ee25561eb46f4e2044f5d26 to your computer and use it in GitHub Desktop.
AVCaptureAudioDataOutput Demo
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import UIKit | |
import AVKit | |
import AVFoundation | |
import AssetsLibrary | |
func synchronized(_ object: AnyObject, block: () -> Void) { | |
objc_sync_enter(object) | |
block() | |
objc_sync_exit(object) | |
} | |
func synchronized<T>(_ object: AnyObject, block: () -> T) -> T { | |
objc_sync_enter(object) | |
let result: T = block() | |
objc_sync_exit(object) | |
return result | |
} | |
class CaptureViewController: UIViewController { | |
let session: AVCaptureSession = AVCaptureSession() | |
var capture: Capture? | |
lazy var previewLayer: AVCaptureVideoPreviewLayer = { | |
let preview = AVCaptureVideoPreviewLayer(session: self.session) | |
preview?.videoGravity = AVLayerVideoGravityResizeAspectFill | |
return preview! | |
}() | |
override func viewWillLayoutSubviews() { | |
super.viewWillLayoutSubviews() | |
previewLayer.frame = view.bounds | |
} | |
override func viewDidLoad() { | |
super.viewDidLoad() | |
view.layer.addSublayer(previewLayer) | |
capture = Capture(session: session) | |
capture?.startRunning() | |
let tap = UITapGestureRecognizer(target: self, action: #selector(tapped)) | |
view.addGestureRecognizer(tap) | |
} | |
func tapped() { | |
if !(capture?.isRecording ?? true) { | |
print("Start Recording") | |
capture?.startRecording() | |
} else { | |
print("Stop Recording") | |
capture?.stopRecording() | |
} | |
} | |
} | |
// internal state machine | |
private enum RosyWriterRecordingStatus: Int { | |
case idle = 0 | |
case startingRecording | |
case recording | |
case stoppingRecording | |
} | |
class Capture: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate, MovieRecorderDelegate { | |
var _captureSession: AVCaptureSession | |
private var _videoDevice: AVCaptureDevice? | |
private var _audioConnection: AVCaptureConnection? | |
private var _videoConnection: AVCaptureConnection? | |
private var _sessionQueue: DispatchQueue | |
private var _videoDataOutputQueue: DispatchQueue | |
private var _videoCompressionSettings: [String : AnyObject] = [:] | |
private var _audioCompressionSettings: [String : AnyObject] = [:] | |
private var _videoBufferOrientation: AVCaptureVideoOrientation = .portrait | |
private var _running: Bool = false | |
private var _recordingStatus: RosyWriterRecordingStatus = .idle | |
private var _recorder: MovieRecorder? | |
private var _recordingURL: URL? | |
var /*atomic*/ recordingOrientation: AVCaptureVideoOrientation = .portrait | |
private var outputVideoFormatDescription: CMFormatDescription? | |
private var outputAudioFormatDescription: CMFormatDescription? | |
private var _previousSecondTimestamps: [CMTime] = [] | |
init(session: AVCaptureSession) { | |
self._captureSession = session | |
_sessionQueue = DispatchQueue(label: "com.apple.sample.capturepipeline.session", attributes: []) | |
// In a multi-threaded producer consumer system it's generally a good idea to make sure that producers do not get starved of CPU time by their consumers. | |
// In this app we start with VideoDataOutput frames on a high priority queue, and downstream consumers use default priority queues. | |
// Audio uses a default priority queue because we aren't monitoring it live and just want to get it into the movie. | |
// AudioDataOutput can tolerate more latency than VideoDataOutput as its buffers aren't allocated out of a fixed size pool. | |
let highQueue = DispatchQueue.global(qos: .userInteractive) | |
//### representing "serial" with empty option makes code less readable, Apple should reconsider... | |
//### and another issue here: https://bugs.swift.org/browse/SR-1859, Apple, please update the documentation of Dispatch soon. | |
_videoDataOutputQueue = DispatchQueue(label: "com.apple.sample.capturepipeline.video", attributes: [], target: highQueue) | |
super.init() | |
} | |
private func setupCaptureSession(session: AVCaptureSession) { | |
let audioDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeAudio) | |
let audioIn = try! AVCaptureDeviceInput(device: audioDevice) | |
if session.canAddInput(audioIn) { | |
session.addInput(audioIn) | |
} | |
let audioOut = AVCaptureAudioDataOutput() | |
// Put audio on its own queue to ensure that our video processing doesn't cause us to drop audio | |
let audioCaptureQueue = DispatchQueue(label: "com.apple.sample.capturepipeline.audio", attributes: []) | |
audioOut.setSampleBufferDelegate(self, queue: audioCaptureQueue) | |
if session.canAddOutput(audioOut) { | |
session.addOutput(audioOut) | |
} | |
_audioConnection = audioOut.connection(withMediaType: AVMediaTypeAudio) | |
/* Video */ | |
guard let videoDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo) else { | |
fatalError("AVCaptureDevice of type AVMediaTypeVideo unavailable!") | |
} | |
do { | |
let videoIn = try AVCaptureDeviceInput(device: videoDevice) | |
if session.canAddInput(videoIn) { | |
session.addInput(videoIn) | |
} | |
_videoDevice = videoDevice | |
} catch { | |
} | |
let videoOut = AVCaptureVideoDataOutput() | |
videoOut.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_420YpCbCr8BiPlanarFullRange as UInt32)] | |
videoOut.setSampleBufferDelegate(self, queue: _videoDataOutputQueue) | |
// RosyWriter records videos and we prefer not to have any dropped frames in the video recording. | |
// By setting alwaysDiscardsLateVideoFrames to NO we ensure that minor fluctuations in system load or in our processing time for a given frame won't cause framedrops. | |
// We do however need to ensure that on average we can process frames in realtime. | |
// If we were doing preview only we would probably want to set alwaysDiscardsLateVideoFrames to YES. | |
videoOut.alwaysDiscardsLateVideoFrames = false | |
if session.canAddOutput(videoOut) { | |
session.addOutput(videoOut) | |
} | |
_videoConnection = videoOut.connection(withMediaType: AVMediaTypeVideo) | |
var frameRate: Int32 | |
var sessionPreset = AVCaptureSessionPresetHigh | |
var frameDuration = kCMTimeInvalid | |
// For single core systems like iPhone 4 and iPod Touch 4th Generation we use a lower resolution and framerate to maintain real-time performance. | |
if ProcessInfo.processInfo.processorCount == 1 { | |
if session.canSetSessionPreset(AVCaptureSessionPreset640x480) { | |
sessionPreset = AVCaptureSessionPreset640x480 | |
} | |
frameRate = 15 | |
} else { | |
frameRate = 30 | |
} | |
session.sessionPreset = sessionPreset | |
frameDuration = CMTimeMake(1, frameRate) | |
do { | |
try videoDevice.lockForConfiguration() | |
videoDevice.activeVideoMaxFrameDuration = frameDuration | |
videoDevice.activeVideoMinFrameDuration = frameDuration | |
videoDevice.unlockForConfiguration() | |
} catch { | |
NSLog("videoDevice lockForConfiguration returned error \(error)") | |
} | |
// Get the recommended compression settings after configuring the session/device. | |
_audioCompressionSettings = audioOut.recommendedAudioSettingsForAssetWriter(withOutputFileType: AVFileTypeQuickTimeMovie) as! [String: AnyObject] | |
_videoCompressionSettings = videoOut.recommendedVideoSettingsForAssetWriter(withOutputFileType: AVFileTypeQuickTimeMovie) as! [String: AnyObject] | |
print(_videoCompressionSettings) // Change AverageBitRate to increase/decrease quality, also fixing to 720p instead of 1080p will aid reducing. | |
_videoBufferOrientation = _videoConnection!.videoOrientation | |
} | |
var isRecording: Bool { | |
get { | |
return self._recordingStatus != .idle | |
} | |
} | |
func startRunning() { | |
_sessionQueue.sync { | |
self.setupCaptureSession(session: _captureSession) | |
_captureSession.startRunning() | |
self._running = true | |
} | |
} | |
func startRecording() { | |
synchronized(self) { | |
if _recordingStatus != .idle { | |
fatalError("Already recording") | |
} | |
self.transitionToRecordingStatus(.startingRecording, error: nil) | |
} | |
let callbackQueue = DispatchQueue(label: "com.apple.sample.capturepipeline.recordercallback", attributes: []); // guarantee ordering of callbacks with a serial queue | |
if _recordingURL == nil { | |
_recordingURL = URL(fileURLWithPath: NSString.path(withComponents: [NSTemporaryDirectory(), "\(Date()).MOV"]) as String) | |
} | |
let recorder = MovieRecorder(url: _recordingURL!, delegate: self, callbackQueue: callbackQueue) | |
recorder.addAudioTrackWithSourceFormatDescription(self.outputAudioFormatDescription!, settings: _audioCompressionSettings) | |
// Front camera recording shouldn't be mirrored | |
let videoTransform = self.transformFromVideoBufferOrientationToOrientation(self.recordingOrientation, withAutoMirroring: false) | |
recorder.addVideoTrackWithSourceFormatDescription(self.outputVideoFormatDescription!, transform: videoTransform, settings: _videoCompressionSettings) | |
_recorder = recorder | |
// asynchronous, will call us back with recorderDidFinishPreparing: or recorder:didFailWithError: when done | |
recorder.prepareToRecord() | |
} | |
func stopRunning() { | |
_sessionQueue.sync { | |
self._running = false | |
// the captureSessionDidStopRunning method will stop recording if necessary as well, but we do it here so that the last video and audio samples are better aligned | |
self.stopRecording() // does nothing if we aren't currently recording | |
self._captureSession.stopRunning() | |
} | |
} | |
func stopRecording() { | |
let returnFlag: Bool = synchronized(self) { | |
if _recordingStatus != .recording { | |
return true | |
} | |
self.transitionToRecordingStatus(.stoppingRecording, error: nil) | |
return false | |
} | |
if returnFlag {return} | |
_recorder?.finishRecording() // asynchronous, will call us back with recorderDidFinishRecording: or recorder:didFailWithError: when done | |
} | |
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) { | |
let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer) | |
if connection === _videoConnection { | |
if self.outputVideoFormatDescription == nil { | |
self.outputVideoFormatDescription = formatDescription | |
} else if _recordingStatus == .recording { | |
self._recorder?.appendVideoSampleBuffer(sampleBuffer) | |
} | |
} else if connection === _audioConnection { | |
self.outputAudioFormatDescription = formatDescription | |
synchronized(self) { | |
if _recordingStatus == .recording { | |
self._recorder?.appendAudioSampleBuffer(sampleBuffer) | |
} | |
} | |
} | |
} | |
//MARK: MovieRecorder Delegate | |
func movieRecorderDidFinishPreparing(_ recorder: MovieRecorder) { | |
synchronized(self) { | |
if _recordingStatus != .startingRecording { | |
fatalError("Expected to be in StartingRecording state") | |
} | |
self.transitionToRecordingStatus(.recording, error: nil) | |
} | |
} | |
func movieRecorder(_ recorder: MovieRecorder, didFailWithError error: Error) { | |
synchronized(self) { | |
_recorder = nil | |
self.transitionToRecordingStatus(.idle, error: error) | |
} | |
} | |
func movieRecorderDidFinishRecording(_ recorder: MovieRecorder) { | |
synchronized(self) { | |
if _recordingStatus != .stoppingRecording { | |
fatalError("Expected to be in StoppingRecording state") | |
} | |
// No state transition, we are still in the process of stopping. | |
// We will be stopped once we save to the assets library. | |
} | |
_recorder = nil | |
let library = ALAssetsLibrary() | |
library.writeVideoAtPath(toSavedPhotosAlbum: _recordingURL!) {assetURL, error in | |
do { | |
try FileManager.default.removeItem(at: self._recordingURL!) | |
} catch _ { | |
} | |
synchronized(self) { | |
if self._recordingStatus != .stoppingRecording { | |
fatalError("Expected to be in StoppingRecording state") | |
} | |
self.transitionToRecordingStatus(.idle, error: error) | |
} | |
} | |
} | |
// call under @synchonized( self ) | |
private func transitionToRecordingStatus(_ newStatus: RosyWriterRecordingStatus, error: Error?) { | |
_recordingStatus = newStatus | |
} | |
// Auto mirroring: Front camera is mirrored; back camera isn't | |
// only valid after startRunning has been called | |
func transformFromVideoBufferOrientationToOrientation(_ orientation: AVCaptureVideoOrientation, withAutoMirroring mirror: Bool) -> CGAffineTransform { | |
var transform = CGAffineTransform.identity | |
// Calculate offsets from an arbitrary reference orientation (portrait) | |
let orientationAngleOffset = angleOffsetFromPortraitOrientationToOrientation(orientation) | |
let videoOrientationAngleOffset = angleOffsetFromPortraitOrientationToOrientation(_videoBufferOrientation) | |
// Find the difference in angle between the desired orientation and the video orientation | |
let angleOffset = orientationAngleOffset - videoOrientationAngleOffset | |
transform = CGAffineTransform(rotationAngle: angleOffset) | |
if _videoDevice!.position == .front { | |
if mirror { | |
transform = transform.scaledBy(x: -1, y: 1) | |
} else { | |
if UIInterfaceOrientationIsPortrait(UIInterfaceOrientation(rawValue: orientation.rawValue)!) { | |
transform = transform.rotated(by: M_PI.g) | |
} | |
} | |
} | |
return transform | |
} | |
private final func angleOffsetFromPortraitOrientationToOrientation(_ orientation: AVCaptureVideoOrientation) -> CGFloat { | |
var angle: CGFloat = 0.0 | |
switch orientation { | |
case .portrait: | |
angle = 0.0 | |
case .portraitUpsideDown: | |
angle = M_PI.g | |
case .landscapeRight: | |
angle = -M_PI_2.g | |
case .landscapeLeft: | |
angle = M_PI_2.g | |
} | |
return angle | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment