Created
November 17, 2016 01:17
-
-
Save samsonjs/71e27c1f500725d3d0c48064af7c1fd3 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// | |
// SnippetVideoCompositionInstruction.swift | |
// OneSecondEveryday | |
// | |
// Created by Sami Samhuri on 2016-09-05. | |
// Copyright © 2016 1 Second Everyday. All rights reserved. | |
// | |
import Foundation | |
import AVFoundation | |
@objc | |
final class SnippetVideoCompositionInstruction: NSObject, AVVideoCompositionInstructionProtocol { | |
// Fixed | |
let enablePostProcessing: Bool = true | |
let containsTweening: Bool = false | |
let passthroughTrackID: CMPersistentTrackID = kCMPersistentTrackID_Invalid | |
// Variable | |
let timeRange: CMTimeRange | |
let requiredSourceTrackIDs: [NSValue]? | |
let videoTrackID: CMPersistentTrackID | |
let targetSize: CGSize | |
let transform: CGAffineTransform | |
let overlayLayer: CALayer? | |
init(track: AVAssetTrack, timeRange: CMTimeRange, overlayLayer: CALayer?, transform: CGAffineTransform, targetSize: CGSize) { | |
assert(overlayLayer == nil || overlayLayer!.bounds.size == targetSize) | |
self.requiredSourceTrackIDs = [NSNumber(value: track.trackID)] | |
self.timeRange = timeRange | |
self.videoTrackID = track.trackID | |
self.transform = transform | |
self.targetSize = targetSize | |
self.overlayLayer = overlayLayer | |
super.init() | |
} | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// | |
// SnippetVideoCompositor.swift | |
// OneSecondEveryday | |
// | |
// Created by Sami Samhuri on 2016-09-05. | |
// Copyright © 2016 1 Second Everyday. All rights reserved. | |
// | |
import Foundation | |
import Dispatch | |
import AVFoundation | |
import CoreImage | |
enum SnippetVideoCompositingError: Error { | |
case invalidRequest | |
case sourceFrameBuffer | |
case overlayTextLayer | |
} | |
@objc | |
final class SnippetVideoCompositor: NSObject, AVVideoCompositing { | |
private let queue = DispatchQueue(label: "life.1se.snippet-video-compositor.render", qos: .default) | |
private var renderContext: AVVideoCompositionRenderContext = AVVideoCompositionRenderContext() | |
private var cancelled: Bool = false | |
private let eaglContext: EAGLContext = EAGLContext(api: .openGLES3) ?? EAGLContext(api: .openGLES2) | |
private lazy var ciContext: CIContext! = { CIContext(eaglContext: self.eaglContext) }() | |
private var cachedOverlaySnapshot: CGImage? | |
private let colorSpace = CGColorSpaceCreateDeviceRGB() | |
var supportsWideColorSourceFrames: Bool { | |
return false | |
} | |
private static let pixelFormat = kCVPixelFormatType_32BGRA | |
let sourcePixelBufferAttributes: [String : Any]? = [ | |
kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: SnippetVideoCompositor.pixelFormat), | |
kCVPixelBufferOpenGLESCompatibilityKey as String: NSNumber(value: true), | |
] | |
let requiredPixelBufferAttributesForRenderContext: [String : Any] = [ | |
kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: SnippetVideoCompositor.pixelFormat), | |
kCVPixelBufferOpenGLESCompatibilityKey as String: NSNumber(value: true), | |
] | |
func renderContextChanged(_ newRenderContext: AVVideoCompositionRenderContext) { | |
renderContext = newRenderContext | |
} | |
func startRequest(_ request: AVAsynchronousVideoCompositionRequest) { | |
queue.async { | |
guard !self.cancelled else { | |
request.finishCancelledRequest() | |
return | |
} | |
do { | |
let renderedBuffer = try self.renderFrame(forRequest: request) | |
request.finish(withComposedVideoFrame: renderedBuffer) | |
} | |
catch { | |
request.finish(with: error) | |
} | |
} | |
} | |
func cancelAllPendingVideoCompositionRequests() { | |
cancelled = true | |
queue.async(flags: .barrier) { | |
self.cancelled = false | |
} | |
} | |
private func overlaySnapshot(layer: CALayer) throws -> CGImage { | |
if let cachedSnapshot = cachedOverlaySnapshot { | |
return cachedSnapshot | |
} | |
layer.isGeometryFlipped = true | |
let size = layer.bounds.size | |
let w = Int(size.width) | |
let h = Int(size.height) | |
guard let context = CGContext(data: nil, width: w, height: h, bitsPerComponent: 8, bytesPerRow: 4 * w, space: colorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue) else { throw NSError() } | |
layer.render(in: context) | |
guard let snapshot = context.makeImage() else { throw NSError() } | |
cachedOverlaySnapshot = snapshot | |
return snapshot | |
} | |
private func renderFrame(forRequest request: AVAsynchronousVideoCompositionRequest) throws -> CVPixelBuffer { | |
return try autoreleasepool { | |
guard let instruction = request.videoCompositionInstruction as? SnippetVideoCompositionInstruction else { | |
throw SnippetVideoCompositingError.invalidRequest | |
} | |
guard let videoFrameBuffer = request.sourceFrame(byTrackID: instruction.videoTrackID) else { | |
// Try to be resilient in the face of errors. If we can't even generate a blank frame then fail. | |
if let blankBuffer = renderContext.newPixelBuffer() { | |
return blankBuffer | |
} | |
else { | |
throw SnippetVideoCompositingError.sourceFrameBuffer | |
} | |
} | |
let frameImage = CIImage(cvPixelBuffer: videoFrameBuffer).applying(instruction.transform) | |
guard let layer = instruction.overlayLayer, let overlayImage = try? CIImage(cgImage: overlaySnapshot(layer: layer)), | |
let composeFilter = CIFilter(name: "CISourceAtopCompositing") else { | |
throw SnippetVideoCompositingError.overlayTextLayer | |
} | |
composeFilter.setValue(frameImage, forKey: kCIInputBackgroundImageKey) | |
composeFilter.setValue(overlayImage, forKey: kCIInputImageKey) | |
guard let outputImage = composeFilter.outputImage, | |
let renderedBuffer = renderContext.newPixelBuffer() else { | |
throw SnippetVideoCompositingError.overlayTextLayer | |
} | |
ciContext.render(outputImage, to: renderedBuffer, bounds: outputImage.extent, colorSpace: self.colorSpace) | |
return renderedBuffer | |
} | |
} | |
} | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment