Last active
August 31, 2022 12:45
-
-
Save akesson/47a0698b8cea99637c5223661bae78c5 to your computer and use it in GitHub Desktop.
[iOS] UIImage array 2 Video
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
//http://stackoverflow.com/questions/3741323/how-do-i-export-uiimage-array-as-a-movie/3742212#3742212 | |
import AVFoundation | |
import UIKit | |
import Photos | |
struct RenderSettings { | |
var width: CGFloat = 1334 | |
var height: CGFloat = 750 | |
var fps: Int32 = 30 // 30 frames per second | |
var avCodecKey = AVVideoCodecH264 | |
var videoFilename = "render" | |
var videoFilenameExt = "mp4" | |
var size: CGSize { | |
return CGSize(width: width, height: height) | |
} | |
var outputURL: NSURL { | |
// Use the CachesDirectory so the rendered video file sticks around as long as we need it to. | |
// Using the CachesDirectory ensures the file won't be included in a backup of the app. | |
let fileManager = NSFileManager.defaultManager() | |
if let tmpDirURL = try? fileManager.URLForDirectory(.CachesDirectory, inDomain: .UserDomainMask, appropriateForURL: nil, create: true) { | |
return tmpDirURL.URLByAppendingPathComponent(videoFilename).URLByAppendingPathExtension(videoFilenameExt) | |
} | |
fatalError("URLForDirectory() failed") | |
} | |
} | |
/* | |
The ImageAnimator class knows about your images and uses the VideoWriter class to perform the rendering. | |
The idea is to keep the video content code separate from the low-level AVFoundation code. I also added | |
saveToLibrary() here as a class function which gets called at the end of the chain to save the video to the Photo Library. | |
*/ | |
class ImageAnimator { | |
// Apple suggests a timescale of 600 because it's a multiple of standard video rates 24, 25, 30, 60 fps etc. | |
static let kTimescale: Int32 = 600 | |
let settings: RenderSettings | |
let videoWriter: VideoWriter | |
var images: [UIImage]! | |
var frameNum = 0 | |
class func saveToLibrary(videoURL: NSURL) { | |
PHPhotoLibrary.requestAuthorization { status in | |
guard status == .Authorized else { return } | |
PHPhotoLibrary.sharedPhotoLibrary().performChanges({ | |
PHAssetChangeRequest.creationRequestForAssetFromVideoAtFileURL(videoURL) | |
}) { success, error in | |
if !success { | |
print("Could not save video to photo library:", error) | |
} | |
} | |
} | |
} | |
class func removeFileAtURL(fileURL: NSURL) { | |
do { | |
try NSFileManager.defaultManager().removeItemAtPath(fileURL.path!) | |
} | |
catch _ as NSError { | |
// Assume file doesn't exist. | |
} | |
} | |
init(renderSettings: RenderSettings) { | |
settings = renderSettings | |
videoWriter = VideoWriter(renderSettings: settings) | |
images = loadImages() | |
} | |
func render(completion: ()->Void) { | |
// The VideoWriter will fail if a file exists at the URL, so clear it out first. | |
ImageAnimator.removeFileAtURL(settings.outputURL) | |
videoWriter.start() | |
videoWriter.render(appendPixelBuffers) { | |
ImageAnimator.saveToLibrary(self.settings.outputURL) | |
completion() | |
} | |
} | |
// Replace this logic with your own. | |
func loadImages() -> [UIImage] { | |
var images = [UIImage]() | |
for index in 1...10 { | |
let filename = "\(index).jpg" | |
images.append(UIImage(named: filename)!) | |
} | |
return images | |
} | |
// This is the callback function for VideoWriter.render() | |
func appendPixelBuffers(writer: VideoWriter) -> Bool { | |
let frameDuration = CMTimeMake(Int64(ImageAnimator.kTimescale / settings.fps), ImageAnimator.kTimescale) | |
while !images.isEmpty { | |
if writer.isReadyForData == false { | |
// Inform writer we have more buffers to write. | |
return false | |
} | |
let image = images.removeFirst() | |
let presentationTime = CMTimeMultiply(frameDuration, Int32(frameNum)) | |
let success = videoWriter.addImage(image, withPresentationTime: presentationTime) | |
if success == false { | |
fatalError("addImage() failed") | |
} | |
frameNum++ | |
} | |
// Inform writer all buffers have been written. | |
return true | |
} | |
} | |
/* | |
The VideoWriter class does all AVFoundation heavy lifting. It's mostly a wrapper around | |
AVAssetWriter and AVAssetWriterInput. It also contains fancy code written by not me that | |
knows how to translate an image into a CVPixelBuffer. | |
*/ | |
class VideoWriter { | |
let renderSettings: RenderSettings | |
var videoWriter: AVAssetWriter! | |
var videoWriterInput: AVAssetWriterInput! | |
var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor! | |
var isReadyForData: Bool { | |
return videoWriterInput?.readyForMoreMediaData ?? false | |
} | |
class func pixelBufferFromImage(image: UIImage, pixelBufferPool: CVPixelBufferPool, size: CGSize) -> CVPixelBuffer { | |
var pixelBufferOut: CVPixelBuffer? | |
let status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool, &pixelBufferOut) | |
if status != kCVReturnSuccess { | |
fatalError("CVPixelBufferPoolCreatePixelBuffer() failed") | |
} | |
let pixelBuffer = pixelBufferOut! | |
CVPixelBufferLockBaseAddress(pixelBuffer, 0) | |
let data = CVPixelBufferGetBaseAddress(pixelBuffer) | |
let rgbColorSpace = CGColorSpaceCreateDeviceRGB() | |
let context = CGBitmapContextCreate(data, Int(size.width), Int(size.height), | |
8, CVPixelBufferGetBytesPerRow(pixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue) | |
CGContextClearRect(context, CGRectMake(0, 0, size.width, size.height)) | |
let horizontalRatio = size.width / image.size.width | |
let verticalRatio = size.height / image.size.height | |
//aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill | |
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit | |
let newSize = CGSize(width: image.size.width * aspectRatio, height: image.size.height * aspectRatio) | |
let x = newSize.width < size.width ? (size.width - newSize.width) / 2 : 0 | |
let y = newSize.height < size.height ? (size.height - newSize.height) / 2 : 0 | |
CGContextDrawImage(context, CGRectMake(x, y, newSize.width, newSize.height), image.CGImage) | |
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0) | |
return pixelBuffer | |
} | |
init(renderSettings: RenderSettings) { | |
self.renderSettings = renderSettings | |
} | |
func start() { | |
let avOutputSettings: [String: AnyObject] = [ | |
AVVideoCodecKey: renderSettings.avCodecKey, | |
AVVideoWidthKey: NSNumber(float: Float(renderSettings.width)), | |
AVVideoHeightKey: NSNumber(float: Float(renderSettings.height)) | |
] | |
func createPixelBufferAdaptor() { | |
let sourcePixelBufferAttributesDictionary = [ | |
kCVPixelBufferPixelFormatTypeKey as String: NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), | |
kCVPixelBufferWidthKey as String: NSNumber(float: Float(renderSettings.width)), | |
kCVPixelBufferHeightKey as String: NSNumber(float: Float(renderSettings.height)) | |
] | |
pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, | |
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary) | |
} | |
func createAssetWriter(outputURL: NSURL) -> AVAssetWriter { | |
guard let assetWriter = try? AVAssetWriter(URL: outputURL, fileType: AVFileTypeMPEG4) else { | |
fatalError("AVAssetWriter() failed") | |
} | |
guard assetWriter.canApplyOutputSettings(avOutputSettings, forMediaType: AVMediaTypeVideo) else { | |
fatalError("canApplyOutputSettings() failed") | |
} | |
return assetWriter | |
} | |
videoWriter = createAssetWriter(renderSettings.outputURL) | |
videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: avOutputSettings) | |
if videoWriter.canAddInput(videoWriterInput) { | |
videoWriter.addInput(videoWriterInput) | |
} | |
else { | |
fatalError("canAddInput() returned false") | |
} | |
// The pixel buffer adaptor must be created before we start writing. | |
createPixelBufferAdaptor() | |
if videoWriter.startWriting() == false { | |
fatalError("startWriting() failed") | |
} | |
videoWriter.startSessionAtSourceTime(kCMTimeZero) | |
precondition(pixelBufferAdaptor.pixelBufferPool != nil, "nil pixelBufferPool") | |
} | |
func render(appendPixelBuffers: (VideoWriter)->Bool, completion: ()->Void) { | |
precondition(videoWriter != nil, "Call start() to initialze the writer") | |
let queue = dispatch_queue_create("mediaInputQueue", nil) | |
videoWriterInput.requestMediaDataWhenReadyOnQueue(queue) { | |
let isFinished = appendPixelBuffers(self) | |
if isFinished { | |
self.videoWriterInput.markAsFinished() | |
self.videoWriter.finishWritingWithCompletionHandler() { | |
dispatch_async(dispatch_get_main_queue()) { | |
completion() | |
} | |
} | |
} | |
else { | |
// Fall through. The closure will be called again when the writer is ready. | |
} | |
} | |
} | |
func addImage(image: UIImage, withPresentationTime presentationTime: CMTime) -> Bool { | |
precondition(pixelBufferAdaptor != nil, "Call start() to initialze the writer") | |
let pixelBuffer = VideoWriter.pixelBufferFromImage(image, pixelBufferPool: pixelBufferAdaptor.pixelBufferPool!, size: renderSettings.size) | |
return pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime) | |
} | |
} | |
let settings = RenderSettings() | |
let imageAnimator = ImageAnimator(renderSettings: settings) | |
imageAnimator.render() { | |
print("yes") | |
} |
The code wasn't made by me, it comes from a stackoverflow post. Maybe you could post the question there. In any case, I think you'd need to refactor the images.removeFirst()
at line 111, to instead load the image from wherever it is stored.
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Thanks for the code. What if you have so many images that they can't fit into a single array (due to memory issues). For example I have 2000 images of high resolution, and I can't fit them all into an array.