create movie from [UIImage], Swift - ios

I've found export [UIImage] as movie, but its all in ObjectiveC and I can't figure it out for Swift.
I need to create a video from [UIImage]
Working on Zoul's answer from above link. part 1) Wire the writer
So Far I have:
let paths = NSFileManager.defaultManager().URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
let documentsURL = paths[0] as! NSURL
let videoWriter:AVAssetWriter = AVAssetWriter(URL: documentsURL, fileType: AVFileTypeQuickTimeMovie, error: nil)
var videoSettings: NSDictionary = NSDictionary(
I can't figure out the correct Swift version of his
NSDictionary *videoSettings = [NSDictionary dictionaryWithObjectsAndKeys:
AVVideoCodecH264, AVVideoCodecKey,
[NSNumber numberWithInt:640], AVVideoWidthKey,
[NSNumber numberWithInt:480], AVVideoHeightKey,
nil];

I convert the objective-c code that posted by ’#Cameron E‘ to Swift 3, and It's working. the answer's link:#Cameron E's CEMovieMaker
following is CXEImagesToVideo class:
//
// CXEImagesToVideo.swift
// VideoAPPTest
//
// Created by Wulei on 16/12/14.
// Copyright © 2016 wulei. All rights reserved.
//
import Foundation
import AVFoundation
import UIKit
typealias CXEMovieMakerCompletion = (URL) -> Void
typealias CXEMovieMakerUIImageExtractor = (AnyObject) -> UIImage?
public class CXEImagesToVideo: NSObject{
var assetWriter:AVAssetWriter!
var writeInput:AVAssetWriterInput!
var bufferAdapter:AVAssetWriterInputPixelBufferAdaptor!
var videoSettings:[String : Any]!
var frameTime:CMTime!
var fileURL:URL!
var completionBlock: CXEMovieMakerCompletion?
var movieMakerUIImageExtractor:CXEMovieMakerUIImageExtractor?
public class func videoSettings(codec:String, width:Int, height:Int) -> [String: Any]{
if(Int(width) % 16 != 0){
print("warning: video settings width must be divisible by 16")
}
let videoSettings:[String: Any] = [AVVideoCodecKey: AVVideoCodecH264,
AVVideoWidthKey: width,
AVVideoHeightKey: height]
return videoSettings
}
public init(videoSettings: [String: Any]) {
super.init()
let paths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let tempPath = paths[0] + "/exprotvideo.mp4"
if(FileManager.default.fileExists(atPath: tempPath)){
guard (try? FileManager.default.removeItem(atPath: tempPath)) != nil else {
print("remove path failed")
return
}
}
self.fileURL = URL(fileURLWithPath: tempPath)
self.assetWriter = try! AVAssetWriter(url: self.fileURL, fileType: AVFileTypeQuickTimeMovie)
self.videoSettings = videoSettings
self.writeInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
assert(self.assetWriter.canAdd(self.writeInput), "add failed")
self.assetWriter.add(self.writeInput)
let bufferAttributes:[String: Any] = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32ARGB)]
self.bufferAdapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: self.writeInput, sourcePixelBufferAttributes: bufferAttributes)
self.frameTime = CMTimeMake(1, 10)
}
func createMovieFrom(urls: [URL], withCompletion: #escaping CXEMovieMakerCompletion){
self.createMovieFromSource(images: urls as [AnyObject], extractor:{(inputObject:AnyObject) ->UIImage? in
return UIImage(data: try! Data(contentsOf: inputObject as! URL))}, withCompletion: withCompletion)
}
func createMovieFrom(images: [UIImage], withCompletion: #escaping CXEMovieMakerCompletion){
self.createMovieFromSource(images: images, extractor: {(inputObject:AnyObject) -> UIImage? in
return inputObject as? UIImage}, withCompletion: withCompletion)
}
func createMovieFromSource(images: [AnyObject], extractor: #escaping CXEMovieMakerUIImageExtractor, withCompletion: #escaping CXEMovieMakerCompletion){
self.completionBlock = withCompletion
self.assetWriter.startWriting()
self.assetWriter.startSession(atSourceTime: kCMTimeZero)
let mediaInputQueue = DispatchQueue(label: "mediaInputQueue")
var i = 0
let frameNumber = images.count
self.writeInput.requestMediaDataWhenReady(on: mediaInputQueue){
while(true){
if(i >= frameNumber){
break
}
if (self.writeInput.isReadyForMoreMediaData){
var sampleBuffer:CVPixelBuffer?
autoreleasepool{
let img = extractor(images[i])
if img == nil{
i += 1
print("Warning: counld not extract one of the frames")
//continue
}
sampleBuffer = self.newPixelBufferFrom(cgImage: img!.cgImage!)
}
if (sampleBuffer != nil){
if(i == 0){
self.bufferAdapter.append(sampleBuffer!, withPresentationTime: kCMTimeZero)
}else{
let value = i - 1
let lastTime = CMTimeMake(Int64(value), self.frameTime.timescale)
let presentTime = CMTimeAdd(lastTime, self.frameTime)
self.bufferAdapter.append(sampleBuffer!, withPresentationTime: presentTime)
}
i = i + 1
}
}
}
self.writeInput.markAsFinished()
self.assetWriter.finishWriting {
DispatchQueue.main.sync {
self.completionBlock!(self.fileURL)
}
}
}
}
func newPixelBufferFrom(cgImage:CGImage) -> CVPixelBuffer?{
let options:[String: Any] = [kCVPixelBufferCGImageCompatibilityKey as String: true, kCVPixelBufferCGBitmapContextCompatibilityKey as String: true]
var pxbuffer:CVPixelBuffer?
let frameWidth = self.videoSettings[AVVideoWidthKey] as! Int
let frameHeight = self.videoSettings[AVVideoHeightKey] as! Int
let status = CVPixelBufferCreate(kCFAllocatorDefault, frameWidth, frameHeight, kCVPixelFormatType_32ARGB, options as CFDictionary?, &pxbuffer)
assert(status == kCVReturnSuccess && pxbuffer != nil, "newPixelBuffer failed")
CVPixelBufferLockBaseAddress(pxbuffer!, CVPixelBufferLockFlags(rawValue: 0))
let pxdata = CVPixelBufferGetBaseAddress(pxbuffer!)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: pxdata, width: frameWidth, height: frameHeight, bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pxbuffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue)
assert(context != nil, "context is nil")
context!.concatenate(CGAffineTransform.identity)
context!.draw(cgImage, in: CGRect(x: 0, y: 0, width: cgImage.width, height: cgImage.height))
CVPixelBufferUnlockBaseAddress(pxbuffer!, CVPixelBufferLockFlags(rawValue: 0))
return pxbuffer
}
}
Usage:
var uiImages = [UIImage]()
/** add image to uiImages */
let settings = CXEImagesToVideo.videoSettings(codec: AVVideoCodecH264, width: (uiImages[0].cgImage?.width)!, height: (uiImages[0].cgImage?.height)!)
let movieMaker = CXEImagesToVideo(videoSettings: settings)
movieMaker.createMovieFrom(images: uiImages){ (fileURL:URL) in
let video = AVAsset(url: fileURL)
let playerItem = AVPlayerItem(asset: video)
let avPlayer = AVPlayer(playerItem: playerItem)
let playerLayer = AVPlayerLayer(player: avPlayer)
playerLayer.frame = CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.width * 3.0 / 4.0)
self.view.layer.addSublayer(playerLayer)
avPlayer.play()
}
export or play the video with fileURL.
There are two ways of async and sync. Gist:https://gist.github.com/Willib/b97b08d8d877ca5d875ff14abb4c3f1a

Constructing a Dictionary literal is straightforward:
import AVFoundation
let videoSettings = [
AVVideoCodecKey: AVVideoCodecH264,
AVVideoWidthKey: 640,
AVVideoHeightKey: 480
]
As for everything else, I would encourage you to read through Apple's The Swift Programming Language to establish fundamentals first, rather than relying on SO or tutorials that happen to cover what you want to do. "Teach a man to fish", as they say.

**for swift 4.2 **
generate video from images and manually save it
images come from prev
controller
// VideoMakerViewController.swift
// VideoMaker
//Created by ISHA PATEL on 05/10/18.
// Copyright © 2018 Isha Patel. All rights reserved.
import AVFoundation
import UIKit
import Photos
import AVKit
var tempurl=""
class VideoMakerViewController: UIViewController {
var images:[UIImage]=[]
#IBOutlet weak var videoview: UIView!
override func viewDidLoad() {
super.viewDidLoad()
DispatchQueue.main.async {
let settings = RenderSettings()
let imageAnimator = ImageAnimator(renderSettings: settings,imagearr: self.images)
imageAnimator.render() {
self.displayVideo()
}
}
}
func displayVideo()
{
let u:String=tempurl
let player = AVPlayer(url: URL(fileURLWithPath: u))
let playerController = AVPlayerViewController()
playerController.player = player
self.addChild(playerController)
videoview.addSubview(playerController.view)
playerController.view.frame.size=(videoview.frame.size)
playerController.view.contentMode = .scaleAspectFit
playerController.view.backgroundColor=UIColor.clear
videoview.backgroundColor=UIColor.clear
player.play()
}
#IBAction func save(_ sender: UIBarButtonItem) {
PHPhotoLibrary.requestAuthorization { status in
guard status == .authorized else { return }
let u:String=tempurl
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: URL(fileURLWithPath: u) as URL)
}) { success, error in
if !success {
print("Could not save video to photo library:", error!)
}
}
}
}
}
struct RenderSettings {
var width: CGFloat = 1500
var height: CGFloat = 844
var fps: Int32 = 2 // 2 frames per second
var avCodecKey = AVVideoCodecType.h264
var videoFilename = "renderExportVideo"
var videoFilenameExt = "mp4"
var size: CGSize {
return CGSize(width: width, height: height)
}
var outputURL: NSURL {
let fileManager = FileManager.default
if let tmpDirURL = try? fileManager.url(for: .cachesDirectory, in: .userDomainMask, appropriateFor: nil, create: true) {
return tmpDirURL.appendingPathComponent(videoFilename).appendingPathExtension(videoFilenameExt) as NSURL
}
fatalError("URLForDirectory() failed")
}
}
class VideoWriter {
let renderSettings: RenderSettings
var videoWriter: AVAssetWriter!
var videoWriterInput: AVAssetWriterInput!
var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor!
var isReadyForData: Bool {
return videoWriterInput?.isReadyForMoreMediaData ?? false
}
class func pixelBufferFromImage(image: UIImage, pixelBufferPool: CVPixelBufferPool, size: CGSize) -> CVPixelBuffer {
var pixelBufferOut: CVPixelBuffer?
let status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool, &pixelBufferOut)
if status != kCVReturnSuccess {
fatalError("CVPixelBufferPoolCreatePixelBuffer() failed")
}
let pixelBuffer = pixelBufferOut!
CVPixelBufferLockBaseAddress(pixelBuffer, [])
let data = CVPixelBufferGetBaseAddress(pixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(size.width), height: Int(size.height),
bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: size.width, height: size.height))
let horizontalRatio = size.width / image.size.width
let verticalRatio = size.height / image.size.height
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize = CGSize(width: image.size.width * aspectRatio, height: image.size.height * aspectRatio)
let x = newSize.width < size.width ? (size.width - newSize.width) / 2 : 0
let y = newSize.height < size.height ? (size.height - newSize.height) / 2 : 0
context!.concatenate(CGAffineTransform.identity)
context!.draw(image.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(pixelBuffer, [])
return pixelBuffer
}
init(renderSettings: RenderSettings) {
self.renderSettings = renderSettings
}
func start() {
let avOutputSettings: [String: AnyObject] = [
AVVideoCodecKey: renderSettings.avCodecKey as AnyObject,
AVVideoWidthKey: NSNumber(value: Float(renderSettings.width)),
AVVideoHeightKey: NSNumber(value: Float(renderSettings.height))
]
func createPixelBufferAdaptor() {
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String: NSNumber(value: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(value: Float(renderSettings.width)),
kCVPixelBufferHeightKey as String: NSNumber(value: Float(renderSettings.height))
]
pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
}
func createAssetWriter(outputURL: NSURL) -> AVAssetWriter {
guard let assetWriter = try? AVAssetWriter(outputURL: outputURL as URL, fileType: AVFileType.mp4) else {
fatalError("AVAssetWriter() failed")
}
guard assetWriter.canApply(outputSettings: avOutputSettings, forMediaType: AVMediaType.video) else {
fatalError("canApplyOutputSettings() failed")
}
return assetWriter
}
videoWriter = createAssetWriter(outputURL: renderSettings.outputURL)
videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: avOutputSettings)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
else {
fatalError("canAddInput() returned false")
}
createPixelBufferAdaptor()
if videoWriter.startWriting() == false {
fatalError("startWriting() failed")
}
videoWriter.startSession(atSourceTime: CMTime.zero)
precondition(pixelBufferAdaptor.pixelBufferPool != nil, "nil pixelBufferPool")
}
func render(appendPixelBuffers: #escaping (VideoWriter)->Bool, completion: #escaping ()->Void) {
precondition(videoWriter != nil, "Call start() to initialze the writer")
let queue = DispatchQueue(label: "mediaInputQueue")
videoWriterInput.requestMediaDataWhenReady(on: queue) {
let isFinished = appendPixelBuffers(self)
if isFinished {
self.videoWriterInput.markAsFinished()
self.videoWriter.finishWriting() {
DispatchQueue.main.async {
completion()
}
}
}
else {
}
}
}
func addImage(image: UIImage, withPresentationTime presentationTime: CMTime) -> Bool {
precondition(pixelBufferAdaptor != nil, "Call start() to initialze the writer")
let pixelBuffer = VideoWriter.pixelBufferFromImage(image: image, pixelBufferPool: pixelBufferAdaptor.pixelBufferPool!, size: renderSettings.size)
return pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
}
}
class ImageAnimator{
static let kTimescale: Int32 = 600
let settings: RenderSettings
let videoWriter: VideoWriter
var images: [UIImage]!
var frameNum = 0
class func removeFileAtURL(fileURL: NSURL) {
do {
try FileManager.default.removeItem(atPath: fileURL.path!)
}
catch _ as NSError {
//
}
}
init(renderSettings: RenderSettings,imagearr: [UIImage]) {
settings = renderSettings
videoWriter = VideoWriter(renderSettings: settings)
images = imagearr
}
func render(completion: #escaping ()->Void) {
// The VideoWriter will fail if a file exists at the URL, so clear it out first.
ImageAnimator.removeFileAtURL(fileURL: settings.outputURL)
videoWriter.start()
videoWriter.render(appendPixelBuffers: appendPixelBuffers) {
let s:String=self.settings.outputURL.path!
tempurl=s
completion()
}
}
func appendPixelBuffers(writer: VideoWriter) -> Bool {
let frameDuration = CMTimeMake(value: Int64(ImageAnimator.kTimescale / settings.fps), timescale: ImageAnimator.kTimescale)
while !images.isEmpty {
if writer.isReadyForData == false {
return false
}
let image = images.removeFirst()
let presentationTime = CMTimeMultiply(frameDuration, multiplier: Int32(frameNum))
let success = videoWriter.addImage(image: image, withPresentationTime: presentationTime)
if success == false {
fatalError("addImage() failed")
}
frameNum=frameNum+1
}
return true
}
}

for swift version 5
Ive made some minor changes in this very good answer. just simply copy and paste and dont forget to pass framesArray argument to "buildVideoFromImageArray function"
func saveVideoToLibrary(videoURL: URL) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: videoURL)
}) { saved, error in
if let error = error {
print("Error saving video to librayr: \(error.localizedDescription)")
}
if saved {
print("Video save to library")
}
}
}
func buildVideoFromImageArray(framesArray:[UIImage]) {
var images = framesArray
let outputSize = CGSize(width:images[0].size.width, height: images[0].size.height)
let fileManager = FileManager.default
let urls = fileManager.urls(for: .cachesDirectory, in: .userDomainMask)
guard let documentDirectory = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.appendingPathComponent("OutputVideo.mp4")
if FileManager.default.fileExists(atPath: videoOutputURL.path) {
do {
try FileManager.default.removeItem(atPath: videoOutputURL.path)
} catch {
fatalError("Unable to delete file: \(error) : \(#function).")
}
}
guard let videoWriter = try? AVAssetWriter(outputURL: videoOutputURL, fileType: AVFileType.mp4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecType.h264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width)), AVVideoHeightKey : NSNumber(value: Float(outputSize.height))] as [String : Any]
guard videoWriter.canApply(outputSettings: outputSettings, forMediaType: AVMediaType.video) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(value: Float(outputSize.width)),
kCVPixelBufferHeightKey as String: NSNumber(value: Float(outputSize.height))
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
if videoWriter.startWriting() {
videoWriter.startSession(atSourceTime: CMTime.zero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = DispatchQueue(__label: "mediaInputQueue", attr: nil)
videoWriterInput.requestMediaDataWhenReady(on: media_queue, using: { () -> Void in
let fps: Int32 = 30//2
let frameDuration = CMTimeMake(value: 1, timescale: fps)
var frameCount: Int64 = 0
var appendSucceeded = true
while (!images.isEmpty) {
if (videoWriterInput.isReadyForMoreMediaData) {
let nextPhoto = images.remove(at: 0)
let lastFrameTime = CMTimeMake(value: frameCount, timescale: fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer, status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, [])
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(outputSize.width), height: Int(outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context?.clear(CGRect(x: 0, y: 0, width: outputSize.width, height: outputSize.height))
let horizontalRatio = CGFloat(outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(outputSize.height) / nextPhoto.size.height
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < outputSize.width ? (outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < outputSize.height ? (outputSize.height - newSize.height) / 2 : 0
context?.draw(nextPhoto.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, [])
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
frameCount += 1
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
//frameCount += 1
}
videoWriterInput.markAsFinished()
videoWriter.finishWriting { () -> Void in
print("Done saving")
self.saveVideoToLibrary(videoURL: videoOutputURL)
}
})
}
}

Related

SwiftUI Share Instagram Post

I'm trying to share a post from Instagram with swiftui, but it didn't work, I didn't have any problems while sharing a story, but the post screen doesn't open when sharing a post.
// plist
<key>LSApplicationQueriesSchemes</key>
<array>
<string>instagram</string>
<string>instagram-stories</string>
</array>
The below code worked for me.
Button {
if post.post_type != PostType.mp4.rawValue {
let imagedData = NSData(contentsOf: imageURL)!
InstagramManager.instance.postImage(image: UIImage(data: imagedData as Data)!) { success in
print("insgram shared success = \(success)")
}
} else {
let videoData = NSData(contentsOf: videoUrl!)
InstagramManager.instance.postVideoToInstagramFeed(videoData: videoData!)
}
import SwiftUI
import Photos
final class InstagramManager : NSObject {
public static let instance = InstagramManager()
func postImage(image: UIImage, result:((Bool)->Void)? = nil) {
guard let instagramURL = NSURL(string: "instagram://app") else {
if let result = result {
result(false)
}
return
}
// let image = image.scaleImageWithAspectToWidth(640)
do {
try PHPhotoLibrary.shared().performChangesAndWait {
let request = PHAssetChangeRequest.creationRequestForAsset(from: image)
let assetID = request.placeholderForCreatedAsset?.localIdentifier ?? ""
let shareURL = "instagram://library?LocalIdentifier=" + assetID
if UIApplication.shared.canOpenURL(instagramURL as URL) {
if let urlForRedirect = NSURL(string: shareURL) {
UIApplication.shared.open(URL(string: "\(urlForRedirect)")!)
}
}
}
} catch {
if let result = result {
result(false)
}
}
}
func postVideoToInstagramFeed(videoData: NSData) {
getLibraryPermissionIfNecessary { granted in
guard granted else { return }
}
PHPhotoLibrary.shared().performChanges({
let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0];
let filePath = "\(documentsPath)/\(Date().description).mp4"
videoData.write(toFile: filePath, atomically: true)
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: URL(fileURLWithPath: filePath))
},
completionHandler: { success, error in
if success {
let fetchOptions = PHFetchOptions()
fetchOptions.sortDescriptors = [NSSortDescriptor(key: "modificationDate", ascending: false)]
let fetchResult = PHAsset.fetchAssets(with: .video, options: fetchOptions)
if let lastAsset = fetchResult.firstObject {
let localIdentifier = lastAsset.localIdentifier
let urlFeed = "instagram://library?LocalIdentifier=" + localIdentifier
guard
let url = URL(string: urlFeed)
else {
print("Could not open url")
return
}
DispatchQueue.main.async {
if UIApplication.shared.canOpenURL(url) {
if #available(iOS 10.0, *) {
UIApplication.shared.open(url, options: [:], completionHandler: { (success) in
print("Sucess")
})
}
else {
UIApplication.shared.openURL(url)
print("Sucess")
}
}
else {
print("Instagram not found")
}
}
}
}
else if let error = error {
print(error.localizedDescription)
}
else {
print("Could not save the video")
}
})
}
func getLibraryPermissionIfNecessary(completionHandler: #escaping (Bool) -> Void) {
guard PHPhotoLibrary.authorizationStatus() != .authorized else {
completionHandler(true)
return
}
PHPhotoLibrary.requestAuthorization { status in
completionHandler(status == .authorized)
}
}
}
import UIKit
extension UIImage {
// MARK: - UIImage+Resize
func scaleImageWithAspectToWidth(_ toWidth:CGFloat) -> UIImage {
let oldWidth:CGFloat = size.width
let scaleFactor:CGFloat = toWidth / oldWidth
let newHeight = self.size.height * scaleFactor
let newWidth = oldWidth * scaleFactor;
UIGraphicsBeginImageContext(CGSize(width: newWidth, height: newHeight))
draw(in: CGRect(x: 0, y: 0, width: newWidth, height: newHeight))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
}

iOS swift memory issue during a Images to Video Convert Using AVAssetWriter

I already do a lot of search for that and do many experiment but I didn't get any proper solution.
i try to convert UIImages to Video. i have 250+ images array and i try to convert this images to video with 60FPS.
i put render code in autoreleasepool method and add some other code also add autoreleasepool but didn't effect.
Code.
import AVFoundation
import UIKit
import Photos
import AVKit
var tempurl = ""
struct RenderSettings {
var width: CGFloat = UIScreen.main.bounds.width * UIScreen.main.scale
var height: CGFloat = UIScreen.main.bounds.width * UIScreen.main.scale
var fps: Int32 = 60 //frames per second
var avCodecKey = AVVideoCodecType.h264
var videoFilename = "ImageToVideo"
var videoFilenameExt = "mp4"
var size: CGSize {
return CGSize(width: width, height: height)
}
var outputURL: URL {
let fileManager = FileManager.default
if let tmpDirURL = try? fileManager.url(for: .cachesDirectory, in: .userDomainMask, appropriateFor: nil, create: true) {
return tmpDirURL.appendingPathComponent(videoFilename).appendingPathExtension(videoFilenameExt) as URL
}
fatalError("URLForDirectory() failed")
}
}
class VideoWriter {
let renderSettings: RenderSettings
var videoWriter: AVAssetWriter!
var videoWriterInput: AVAssetWriterInput!
var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor!
var isReadyForData: Bool {
return videoWriterInput?.isReadyForMoreMediaData ?? false
}
class func pixelBufferFromImage(image: UIImage, pixelBufferPool: CVPixelBufferPool, size: CGSize) -> CVPixelBuffer {
autoreleasepool {
var pixelBufferOut: CVPixelBuffer?
let status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool, &pixelBufferOut)
if status != kCVReturnSuccess {
fatalError("CVPixelBufferPoolCreatePixelBuffer() failed")
}
let pixelBuffer = pixelBufferOut!
CVPixelBufferLockBaseAddress(pixelBuffer, [])
let data = CVPixelBufferGetBaseAddress(pixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(size.width), height: Int(size.height),
bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: size.width, height: size.height))
let horizontalRatio = size.width / image.size.width
let verticalRatio = size.height / image.size.height
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize = CGSize(width: image.size.width * aspectRatio, height: image.size.height * aspectRatio)
let x = newSize.width < size.width ? (size.width - newSize.width) / 2 : 0
let y = newSize.height < size.height ? (size.height - newSize.height) / 2 : 0
context!.concatenate(CGAffineTransform.identity)
context!.draw(image.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(pixelBuffer, [])
return pixelBuffer
}
}
init(renderSettings: RenderSettings) {
self.renderSettings = renderSettings
}
func start() {
let avOutputSettings: [String: AnyObject] = [
AVVideoCodecKey: renderSettings.avCodecKey as AnyObject,
AVVideoWidthKey: NSNumber(value: Float(renderSettings.width)),
AVVideoHeightKey: NSNumber(value: Float(renderSettings.height))
]
func createPixelBufferAdaptor() {
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String: NSNumber(value: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(value: Float(renderSettings.width)),
kCVPixelBufferHeightKey as String: NSNumber(value: Float(renderSettings.height))
]
pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
}
func createAssetWriter(outputURL: URL) -> AVAssetWriter {
guard let assetWriter = try? AVAssetWriter(outputURL: outputURL, fileType: AVFileType.mp4) else {
fatalError("AVAssetWriter() failed")
}
guard assetWriter.canApply(outputSettings: avOutputSettings, forMediaType: AVMediaType.video) else {
fatalError("canApplyOutputSettings() failed")
}
return assetWriter
}
videoWriter = createAssetWriter(outputURL: renderSettings.outputURL)
videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: avOutputSettings)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
else {
fatalError("canAddInput() returned false")
}
createPixelBufferAdaptor()
if videoWriter.startWriting() == false {
fatalError("startWriting() failed")
}
videoWriter.startSession(atSourceTime: CMTime.zero)
precondition(pixelBufferAdaptor.pixelBufferPool != nil, "nil pixelBufferPool")
}
func render(appendPixelBuffers: #escaping (VideoWriter)->Bool, completion: #escaping ()->Void) {
autoreleasepool {
precondition(videoWriter != nil, "Call start() to initialze the writer")
let queue = DispatchQueue(label: "mediaInputQueue")
videoWriterInput.requestMediaDataWhenReady(on: queue) {
let isFinished = appendPixelBuffers(self)
if isFinished {
self.videoWriterInput.markAsFinished()
self.videoWriter.finishWriting() {
DispatchQueue.main.async {
completion()
}
}
}
}
}
}
func addImage(image: UIImage, withPresentationTime presentationTime: CMTime) -> Bool {
autoreleasepool {
precondition(pixelBufferAdaptor != nil, "Call start() to initialze the writer")
let pixelBuffer = VideoWriter.pixelBufferFromImage(image: image, pixelBufferPool: pixelBufferAdaptor.pixelBufferPool!, size: renderSettings.size)
return pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
}
}
}
class ImageAnimator {
static let kTimescale: Int32 = 600
let settings: RenderSettings
let videoWriter: VideoWriter
var images: [UIImage]!
var frameNum = 0
class func removeFileAtURL(fileURL: URL) {
do {
try FileManager.default.removeItem(atPath: fileURL.path)
}
catch _ as NSError {
//
}
}
init(renderSettings: RenderSettings,imagearr: [UIImage]) {
settings = renderSettings
videoWriter = VideoWriter(renderSettings: settings)
images = imagearr
}
func render(completion: #escaping ()->Void) {
// The VideoWriter will fail if a file exists at the URL, so clear it out first.
ImageAnimator.removeFileAtURL(fileURL: settings.outputURL)
videoWriter.start()
videoWriter.render(appendPixelBuffers: appendPixelBuffers) {
let s: String = self.settings.outputURL.path
tempurl = s
completion()
}
}
func appendPixelBuffers(writer: VideoWriter) -> Bool {
let frameDuration = CMTimeMake(value: Int64(ImageAnimator.kTimescale / settings.fps), timescale: ImageAnimator.kTimescale)
while !images.isEmpty {
if writer.isReadyForData == false {
return false
}
let image = images.removeFirst()
let presentationTime = CMTimeMultiply(frameDuration, multiplier: Int32(frameNum))
let success = videoWriter.addImage(image: image, withPresentationTime: presentationTime)
if success == false {
fatalError("addImage() failed")
}
frameNum=frameNum+1
}
return true
}
}
Memory Usage :
Get Images Using This Code :
#objc public class Recorder: NSObject {
public var view : UIView?
var displayLink : CADisplayLink?
var referenceDate : NSDate?
var imageArray = [UIImage]()
public func start() {
self.imageArray.removeAll()
if (view == nil) {
NSException(name: NSExceptionName(rawValue: "No view set"), reason: "You must set a view before calling start.", userInfo: nil).raise()
}else {
displayLink = CADisplayLink(target: self, selector: #selector(self.handleDisplayLink(displayLink:)))
displayLink!.add(to: RunLoop.main, forMode: RunLoop.Mode.common)
referenceDate = NSDate()
}
}
#objc func handleDisplayLink(displayLink : CADisplayLink) {
if (view != nil) {
createImageFromView(captureView: view!)
}
}
func createImageFromView(captureView : UIView) {
UIGraphicsBeginImageContextWithOptions(captureView.bounds.size, false, 0)
captureView.drawHierarchy(in: captureView.bounds, afterScreenUpdates: false)
let image = UIGraphicsGetImageFromCurrentImageContext();
if let img = image {
self.imageArray.append(img)
}
UIGraphicsEndImageContext();
}
public func stop(completion: #escaping (_ saveURL: String) -> Void) {
displayLink?.invalidate()
let seconds = referenceDate?.timeIntervalSinceNow
if (seconds != nil) {
print("Image Count : \(self.imageArray.count)")
DispatchQueue.main.async {
let settings = RenderSettings()
let imageAnimator = ImageAnimator(renderSettings: settings,imagearr: self.imageArray)
imageAnimator.render() {
let u: String = tempurl
completion(u)
//self.saveVideoInPhotos()
}
}
}
}
}
Thank In Advance

Swift -Rich Notification Cuts Off Top and Bottom of Image

I'm using rich notifications to display a https url image in a push notification. The image is full screen. The problem is the rich notification displays the center of the image which cuts off the top and bottom of the image.
I tried to shrink the image before sending it, but it still gets cut off.
I tried to shrink the image when retrieving it and displaying it but it still gets cut off.
How can I get the entire image to fit inside the rich notification attachment?
override func didReceive(_ request: UNNotificationRequest, withContentHandler contentHandler: #escaping (UNNotificationContent) -> Void) {
self.contentHandler = contentHandler
bestAttemptContent = (request.content.mutableCopy() as? UNMutableNotificationContent)
func failEarly() {
contentHandler(request.content)
}
guard let bestAttemptContent = bestAttemptContent else { failEarly(); return }
guard let attachmentUrlString = request.content.userInfo["media-url"] as? String else { failEarly(): return }
guard let url = URL(string: attachmentUrlString) else { failEarly(); return }
URLSession.shared.downloadTask(with: url, completionHandler: { [weak self](tempLocation, response, error) -> Void in
if let error = error { return }
guard let location = tempLocation else { return }
guard let response = response else { return }
do {
let lastPathComponent = response.url?.lastPathComponent ?? ""
var attachmentID = UUID.init().uuidString + lastPathComponent
if response.suggestedFilename != nil {
attachmentID = UUID.init().uuidString + response.suggestedFilename!
}
let tempDict = NSTemporaryDirectory()
let tempFilePath = tempDict + attachmentID
try FileManager.default.moveItem(atPath: location.path, toPath: tempFilePath)
guard let image = UIImage(contentsOfFile: tempFilePath) else { return }
guard let resizedImage = NotificationService.resizeImage(image: image, newWidth: 200) else { return } // I went all the way down to 25 and it was just a blurry image
guard let imageData = resizedImage.jpegData(compressionQuality: 1.0) else { return }
let fileURL = URL(fileURLWithPath: NSTemporaryDirectory(), isDirectory: true)
.appendingPathComponent("pkm", isDirectory: false)
.appendingPathExtension("jpg")
try imageData.write(to: fileURL)
let attachment = try UNNotificationAttachment.init(identifier: "pkm.jpg", url: fileURL, options: nil)
bestAttemptContent.attachments.append(attachment)
}
catch { return }
OperationQueue.main.addOperation({[weak self]() -> Void in
self?.contentHandler?(bestAttemptContent);
})
}).resume()
}
extension NotificationService {
static func resizeImage(image: UIImage, newWidth: CGFloat) -> UIImage? {
let scale = newWidth / image.size.width
let newHeight = image.size.height * scale
UIGraphicsBeginImageContext(CGSize(width: newWidth, height: newHeight))
image.draw(in: CGRect(x: 0, y: 0, width: newWidth, height: newHeight))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}
rich notification with the top and bottom cut off (no head, no bottom foot):
an example of the image before the top and bottom were cut off (has head and feet):
I couldn't get the full image but I could get the top half of it by cropping it using this answer
override func didReceive(_ request: UNNotificationRequest, withContentHandler contentHandler: #escaping (UNNotificationContent) -> Void) {
self.contentHandler = contentHandler
bestAttemptContent = (request.content.mutableCopy() as? UNMutableNotificationContent)
func failEarly() {
contentHandler(request.content)
}
guard let bestAttemptContent = bestAttemptContent else { failEarly(); return }
guard let attachmentUrlString = request.content.userInfo["media-url"] as? String else { failEarly(): return }
guard let url = URL(string: attachmentUrlString) else { failEarly(); return }
URLSession.shared.downloadTask(with: url, completionHandler: { [weak self](tempLocation, response, error) -> Void in
if let error = error { return }
guard let location = tempLocation else { return }
guard let response = response else { return }
do {
let lastPathComponent = response.url?.lastPathComponent ?? ""
var attachmentID = UUID.init().uuidString + lastPathComponent
if response.suggestedFilename != nil {
attachmentID = UUID.init().uuidString + response.suggestedFilename!
}
let tempDict = NSTemporaryDirectory()
let tempFilePath = tempDict + attachmentID
try FileManager.default.moveItem(atPath: location.path, toPath: tempFilePath)
guard let image = UIImage(contentsOfFile: tempFilePath) else { return }
let croppedImage = NotificationService.cropImageInHalf(image: image) // cropping occurs here
guard let imageData = croppedImage.jpegData(compressionQuality: 1.0) else { return }
let fileURL = URL(fileURLWithPath: NSTemporaryDirectory(), isDirectory: true)
.appendingPathComponent("pkm", isDirectory: false)
.appendingPathExtension("jpg")
try imageData.write(to: fileURL)
let attachment = try UNNotificationAttachment.init(identifier: "pkm.jpg", url: fileURL, options: nil)
bestAttemptContent.attachments.append(attachment)
}
catch { return }
OperationQueue.main.addOperation({[weak self]() -> Void in
self?.contentHandler?(bestAttemptContent);
})
}).resume()
}
extension NotificationService {
static func cropImageInHalf(image: UIImage) -> UIImage {
let height = CGFloat(image.size.height * 0.5)
let rect = CGRect(x: 0, y: 0, width: image.size.width, height: height)
return cropImage(image: image, toRect: rect)
}
static func cropImage(image:UIImage, toRect rect:CGRect) -> UIImage {
let imageRef:CGImage = image.cgImage!.cropping(to: rect)!
let croppedImage:UIImage = UIImage(cgImage:imageRef)
return croppedImage
}
}

Delete white background from UIImage in swift 5

I am trying to save 2 copies of a photo that are taken from the camera, one is the photo itself and the other one its the photo + emojis hiding faces of people that appears there.
Right now I only get the original photo + a image with white background and the emoji faces, but not with the photo behind.
this is the code I use to put the emojis over the faces:
private func detectFace(in image: CVPixelBuffer) {
let faceDetectionRequest = VNDetectFaceLandmarksRequest(completionHandler: { (request: VNRequest, error: Error?) in
DispatchQueue.main.async {
if let results = request.results as? [VNFaceObservation] {
self.handleFaceDetectionResults(results)
} else {
self.clearDrawings()
}
}
})
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: image, orientation: .leftMirrored, options: [:])
try? imageRequestHandler.perform([faceDetectionRequest])
}
private func handleFaceDetectionResults(_ observedFaces: [VNFaceObservation]) {
self.clearDrawings()
let facesBoundingBoxes: [CAShapeLayer] = observedFaces.flatMap({ (observedFace: VNFaceObservation) -> [CAShapeLayer] in
let faceBoundingBoxOnScreen = self.previewLayer.layerRectConverted(fromMetadataOutputRect: observedFace.boundingBox)
let image = UIImage(named: "happy_emoji.png")
let imageView = UIImageView(image: image!)
imageView.frame = faceBoundingBoxOnScreen
showCamera.addSubview(imageView)
let newDrawings = [CAShapeLayer]()
return newDrawings
})
self.drawings = facesBoundingBoxes
}
private func clearDrawings() {
showCamera.subviews.forEach({ $0.removeFromSuperview() })
}
and this is the code i use to save the images:
#IBAction func onPhotoTaken(_ sender: Any) {
let settings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
self.photoOutput.capturePhoto(with: settings, delegate: self)
UIGraphicsBeginImageContextWithOptions(showCamera.frame.size, false, 0.0)
if let context = UIGraphicsGetCurrentContext() {
showCamera.layer.render(in: context)
}
let outputImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
let topImage = outputImage
let bottomImage = imageReciber
let size = CGSize(width: topImage!.size.width, height: topImage!.size.height + bottomImage.size.height)
UIGraphicsBeginImageContextWithOptions(size, false, 0.0)
topImage!.draw(in: CGRect(x: 0, y: 0, width: size.width, height: topImage!.size.height))
bottomImage.draw(in: CGRect(x: 0, y: topImage!.size.height, width: size.width, height: bottomImage.size.height))
let newImage:UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
UIImageWriteToSavedPhotosAlbum(newImage, nil, nil, nil)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation()
else { return }
let image = UIImage(data: imageData)
showCamera.image = image
imageReciber = image!
UIImageWriteToSavedPhotosAlbum(showCamera.image!, nil, nil, nil)
}
I tried different solutions to delete the white background (or black, depends if i put false or true on the "render" part. but i always get the emoji image with white background.
Please help me to get the emoji image with no white/black background and over the photo taken.
My full code here is:
import UIKit
import AVFoundation
import Vision
class cameraViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, AVCapturePhotoCaptureDelegate {
private let captureSession = AVCaptureSession()
private lazy var previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
private let videoDataOutput = AVCaptureVideoDataOutput()
private var drawings: [CAShapeLayer] = []
private let photoOutput = AVCapturePhotoOutput()
var imageReciber = UIImage()
#IBOutlet weak var showCamera: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
self.addCameraInput()
self.showCameraFeed()
self.getCameraFrames()
self.captureSession.startRunning()
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
self.previewLayer.frame = self.showCamera.frame
}
func captureOutput(
_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
guard let frame = CMSampleBufferGetImageBuffer(sampleBuffer) else {
debugPrint("unable to get image from sample buffer")
return
}
self.detectFace(in: frame)
}
private func addCameraInput() {
guard let device = AVCaptureDevice.DiscoverySession(
deviceTypes: [.builtInWideAngleCamera, .builtInDualCamera, .builtInTrueDepthCamera],
mediaType: .video,
position: .back).devices.first else {
fatalError("No back camera device found, please make sure to run SimpleLaneDetection in an iOS device and not a simulator")
}
let cameraInput = try! AVCaptureDeviceInput(device: device)
self.captureSession.addInput(cameraInput)
captureSession.addOutput(photoOutput)
}
private func showCameraFeed() {
self.previewLayer.videoGravity = .resizeAspectFill
self.showCamera.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.showCamera.frame
}
private func getCameraFrames() {
self.videoDataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
self.videoDataOutput.alwaysDiscardsLateVideoFrames = true
self.videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "camera_frame_processing_queue"))
self.captureSession.addOutput(self.videoDataOutput)
guard let connection = self.videoDataOutput.connection(with: AVMediaType.video),
connection.isVideoOrientationSupported else { return }
connection.videoOrientation = .portrait
}
private func detectFace(in image: CVPixelBuffer) {
let faceDetectionRequest = VNDetectFaceLandmarksRequest(completionHandler: { (request: VNRequest, error: Error?) in
DispatchQueue.main.async {
if let results = request.results as? [VNFaceObservation] {
self.handleFaceDetectionResults(results)
} else {
self.clearDrawings()
}
}
})
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: image, orientation: .leftMirrored, options: [:])
try? imageRequestHandler.perform([faceDetectionRequest])
}
private func handleFaceDetectionResults(_ observedFaces: [VNFaceObservation]) {
self.clearDrawings()
let facesBoundingBoxes: [CAShapeLayer] = observedFaces.flatMap({ (observedFace: VNFaceObservation) -> [CAShapeLayer] in
let faceBoundingBoxOnScreen = self.previewLayer.layerRectConverted(fromMetadataOutputRect: observedFace.boundingBox)
let image = UIImage(named: "happy_emoji.png")
let imageView = UIImageView(image: image!)
imageView.frame = faceBoundingBoxOnScreen
showCamera.addSubview(imageView)
let newDrawings = [CAShapeLayer]()
return newDrawings
})
self.drawings = facesBoundingBoxes
}
private func clearDrawings() {
showCamera.subviews.forEach({ $0.removeFromSuperview() })
}
#IBAction func onPhotoTaken(_ sender: Any) {
let settings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
self.photoOutput.capturePhoto(with: settings, delegate: self)
UIGraphicsBeginImageContextWithOptions(showCamera.frame.size, false, 0.0)
if let context = UIGraphicsGetCurrentContext() {
showCamera.layer.render(in: context)
}
let outputImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
let topImage = outputImage
let bottomImage = imageReciber
let size = CGSize(width: topImage!.size.width, height: topImage!.size.height + bottomImage.size.height)
UIGraphicsBeginImageContextWithOptions(size, false, 0.0)
topImage!.draw(in: CGRect(x: 0, y: 0, width: size.width, height: topImage!.size.height))
bottomImage.draw(in: CGRect(x: 0, y: topImage!.size.height, width: size.width, height: bottomImage.size.height))
let newImage:UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
UIImageWriteToSavedPhotosAlbum(newImage, nil, nil, nil)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation()
else { return }
let image = UIImage(data: imageData)
//Se añade la imagen capturada desde el Buffer a imageView y se le da un borde algo redondeado para que quede bien.
showCamera.image = image
imageReciber = image!
UIImageWriteToSavedPhotosAlbum(showCamera.image!, nil, nil, nil)
}
}
Thank you in advance.
After looking more calmly into the problem I´ve discovered how to solve my problem.
The problem is that I was trying to print the image to a file before getting it from the video stream, to solve this I created a new function that executes after the image is taken and now everything works flawlessly.
func saveEmoji() {
showCamera.backgroundColor = UIColor.clear
UIGraphicsBeginImageContextWithOptions(showCamera.frame.size, true, 0.0)
if let context = UIGraphicsGetCurrentContext() {
showCamera.layer.render(in: context)
}
let outputImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
var topImage = outputImage
UIImageWriteToSavedPhotosAlbum(topImage!, nil, nil, nil)
topImage = nil
}
The function is called after the first image is saved:
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation()
else { return }
let image = UIImage(data: imageData)
showCamera.image = image
imageReciber = image!
UIImageWriteToSavedPhotosAlbum(showCamera.image!, nil, nil, nil)
saveEmoji()
}

Black frames while converting array of UIImages to Video

I'm trying to convert an array of UIImages to video but I have a lot of black frames in resulting file (like, 4 black frames at the beginning, and 3 good frames after them, and after that 3 black frames and 2 good frames and this pattern is repeated till the end of the video).
My code is based on this solution but I believe that the main source of problem should be in this part of code:
func build(progress: (NSProgress -> Void), success: (NSURL -> Void), failure: (NSError -> Void)) {
//videosizes and path to temp output file
let inputSize = CGSize(width: 568, height: 320)
let outputSize = CGSize(width: 568, height: 320)
var error: NSError?
let documentsPath = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0] as! NSString
let videoOutputURL = NSURL(fileURLWithPath: documentsPath.stringByAppendingPathComponent("TempVideo.mov"))!
NSFileManager.defaultManager().removeItemAtURL(videoOutputURL, error: nil)
videoWriter = AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeMPEG4, error: &error)
if let videoWriter = videoWriter {
let videoSettings: [NSObject : AnyObject] = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : outputSize.width,
AVVideoHeightKey : outputSize.height,
]
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(
assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: [
kCVPixelBufferPixelFormatTypeKey : kCVPixelFormatType_32ARGB,
kCVPixelBufferWidthKey : inputSize.width,
kCVPixelBufferHeightKey : inputSize.height,
]
)
assert(videoWriter.canAddInput(videoWriterInput))
videoWriter.addInput(videoWriterInput)
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 30
let frameDuration = CMTimeMake(1, fps)
let currentProgress = NSProgress(totalUnitCount: Int64(self.photoURLs.count))
var frameCount: Int64 = 0
for var i = 0; i < self.photoURLs.count - 1; i++ {
var currentFrame = self.photoURLs[i]
var lastFrameTime = CMTimeMake(Int64(i), fps)
var presentationTime = CMTimeAdd(lastFrameTime, frameDuration)
//this one is needed because sometimes videoWriter is not ready, and we have to wait for a while
while videoWriterInput.readyForMoreMediaData == false {
var maxDate = NSDate(timeIntervalSinceNow: 0.5)
var currentRunLoop = NSRunLoop()
currentRunLoop.runUntilDate(maxDate)
}
self.appendPixelBufferForImageAtURL(currentFrame, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime)
frameCount++
currentProgress.completedUnitCount = frameCount
progress(currentProgress)
}
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
if error == nil {
success(videoOutputURL)
}
}
})
} else {
error = NSError(
domain: kErrorDomain,
code: kFailedToStartAssetWriterError,
userInfo: ["description": "AVAssetWriter failed to start writing"]
)
}
}
if let error = error {
failure(error)
}
}
Obviously I'm doing something wrong but what? I think it should be here because some of the images don't have any problems with conversion, but there are two more functions for pixelbuffer:
func appendPixelBufferForImageAtURL(image: UIImage, pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor, presentationTime: CMTime) -> Bool {
var appendSucceeded = true
autoreleasepool {
var pixelBuffer: Unmanaged<CVPixelBuffer>?
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(
kCFAllocatorDefault,
pixelBufferAdaptor.pixelBufferPool,
&pixelBuffer
)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer.takeRetainedValue()
fillPixelBufferFromImage(image, pixelBuffer: managedPixelBuffer)
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(
managedPixelBuffer,
withPresentationTime: presentationTime
)
} else {
NSLog("error: Failed to allocate pixel buffer from pool")
}
}
return appendSucceeded
}
func fillPixelBufferFromImage(image: UIImage, pixelBuffer: CVPixelBufferRef) {
let imageData = CGDataProviderCopyData(CGImageGetDataProvider(image.CGImage))
let lockStatus:UInt8 = UInt8(CVPixelBufferLockBaseAddress(pixelBuffer, 0))
let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer)
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(
pixelData,
Int(568),
Int(320),
8,
Int(8 * 320),
rgbColorSpace,
bitmapInfo
)
var imageDataProvider = CGDataProviderCreateWithCFData(imageData)
var imageRef = CGImageCreateWithJPEGDataProvider(imageDataProvider, nil, true, kCGRenderingIntentDefault)
CGContextDrawImage(context, CGRectMake(0, 0, 568, 320), imageRef)
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0)
}
So I was able to solve this by rewriting the fillPixelBufferFromImage using an example I found here: CVPixelBufferPool Error ( kCVReturnInvalidArgument/-6661)
Here's the Swift 2 - Xcode 7 GM solution that's working for me:
public func build(progress: (NSProgress -> Void), success: (NSURL -> Void), failure: (NSError -> Void)) {
let inputSize = CGSize(width: 600, height: 600)
let outputSize = CGSize(width: 600, height: 600)
var error: NSError?
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.URLByAppendingPathComponent("AssembledVideo.mov")
if NSFileManager.defaultManager().fileExistsAtPath(videoOutputURL.path!) {
do {
try NSFileManager.defaultManager().removeItemAtPath(videoOutputURL.path!)
}catch{
fatalError("Unable to delete file: \(error) : \(__FUNCTION__).")
}
}
guard let videoWriter = try? AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeQuickTimeMovie) else{
fatalError("AVAssetWriter error")
}
let outputSettings = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : NSNumber(float: Float(outputSize.width)),
AVVideoHeightKey : NSNumber(float: Float(outputSize.height)),
]
guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String: NSNumber(unsignedInt: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(float: Float(inputSize.width)),
kCVPixelBufferHeightKey as String: NSNumber(float: Float(inputSize.height)),
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(
assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary
)
assert(videoWriter.canAddInput(videoWriterInput))
videoWriter.addInput(videoWriterInput)
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 1
let frameDuration = CMTimeMake(1, fps)
let currentProgress = NSProgress(totalUnitCount: Int64(self.photoURLs.count))
var frameCount: Int64 = 0
var remainingPhotoURLs = [String](self.photoURLs)
while (videoWriterInput.readyForMoreMediaData && !remainingPhotoURLs.isEmpty) {
let nextPhotoURL = remainingPhotoURLs.removeAtIndex(0)
let lastFrameTime = CMTimeMake(frameCount, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
if !self.appendPixelBufferForImageAtURL(nextPhotoURL, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
error = NSError(domain: kErrorDomain, code: kFailedToAppendPixelBufferError,
userInfo: [
"description": "AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer",
"rawError": videoWriter.error ?? "(none)"
])
break
}
frameCount++
currentProgress.completedUnitCount = frameCount
progress(currentProgress)
}
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
if error == nil {
success(videoOutputURL)
}
}
})
} else {
error = NSError(domain: kErrorDomain, code: kFailedToStartAssetWriterError,
userInfo: ["description": "AVAssetWriter failed to start writing"]
)
}
if let error = error {
failure(error)
}
}
public func appendPixelBufferForImageAtURL(urlString: String, pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor, presentationTime: CMTime) -> Bool {
var appendSucceeded = true
autoreleasepool {
if let image = UIImage(contentsOfFile: urlString) {
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
fillPixelBufferFromImage(image.CGImage!, pixelBuffer: managedPixelBuffer)
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
} else {
NSLog("error: Failed to allocate pixel buffer from pool")
}
}
}
return appendSucceeded
}
func fillPixelBufferFromImage(image: CGImage, pixelBuffer: CVPixelBuffer){
let frameSize = CGSizeMake(CGFloat(CGImageGetWidth(image)), CGFloat(CGImageGetHeight(image)))
CVPixelBufferLockBaseAddress(pixelBuffer, 0)
let data = CVPixelBufferGetBaseAddress(pixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(data, Int(frameSize.width), Int(frameSize.height), 8, CVPixelBufferGetBytesPerRow(pixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
CGContextDrawImage(context, CGRectMake(0, 0, CGFloat(CGImageGetWidth(image)), CGFloat(CGImageGetHeight(image))), image)
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0)
}
Working project files here:
https://github.com/justinlevi/imagesToVideo

Resources