In my app, I want to generate multiple thumbnails of a video, preferably good quality thumbnails. My old approach was to perform a loop 15 times, and copy a CGImage at different time. As shown below
func generateThumbnails(_ fileURL:URL) {
let asset = AVAsset(url: fileURL)
let imageGenerator = AVAssetImageGenerator(asset: asset)
imageGenerator.apertureMode = AVAssetImageGeneratorApertureMode.cleanAperture
imageGenerator.appliesPreferredTrackTransform = true
let duration = asset.duration
let seconds = CMTimeGetSeconds(duration)
let addition = seconds / 15
var number = 1.0
do {
while number < seconds {
let thumbnailCGImage = try imageGenerator.copyCGImage(at: CMTimeMake(Int64(number),1), actualTime: nil)
let image = UIImage(cgImage: thumbnailCGImage)
thumbnails.append(image)
number += addition
}
} catch let err {
return
}
}
However, after doing some more research, I found it was more logical to just generate thumbnails asynchronously using
let imageGenerator = AVAssetImageGenerator(asset: asset)
imageGenerator.generateCGImagesAsynchronously(forTimes: [NSValue], completionHandler: AVAssetImageGeneratorCompletionHandler)
However, I don't really know what the I am supposed to input into the [NSValue] and the completion handler.
I just need an explanation on how to generate thumbnails this way, and see if it's the better thing to do.
https://developer.apple.com/documentation/avfoundation/avassetimagegenerator/1388100-generatecgimagesasynchronously
requestedTimes An array of NSValue objects, each containing a CMTime,
specifying the asset times at which an image is requested.
Usage:
let duration = asset.duration
let seconds = CMTimeGetSeconds(duration)
let addition = seconds / 15
var number = 1.0
var times = [NSValue]()
times.append(NSValue(time: CMTimeMake(Int64(number), 1)))
while number < seconds {
number += addition
times.append(NSValue(time: CMTimeMake(Int64(number), 1)))
}
struct Formatter {
static let formatter: DateFormatter = {
let result = DateFormatter()
result.dateStyle = .short
return result
}()
}
imageGenerator.generateCGImagesAsynchronously(forTimes: times) { (requestedTime, cgImage, actualImageTime, status, error) in
let seconds = CMTimeGetSeconds(requestedTime)
let date = Date(timeIntervalSinceNow: seconds)
let time = Formatter.formatter.string(from: date)
switch status {
case .succeeded: do {
if let image = cgImage {
print("Generated image for approximate time: \(time)")
let img = UIImage(cgImage: image)
//do something with `img`
}
else {
print("Failed to generate a valid image for time: \(time)")
}
}
case .failed: do {
if let error = error {
print("Failed to generate image with Error: \(error) for time: \(time)")
}
else {
print("Failed to generate image for time: \(time)")
}
}
case .cancelled: do {
print("Image generation cancelled for time: \(time)")
}
}
}
Related
I'm making video from taking snapshots (30 times per sec), and I really need to find the best approach to get snapshot in the background thread with the best possible performance.
I have two approach in UIView extension
extension UIView {
func snapshotInMain(view: UIView) -> UIImage {
let render = UIGraphicsImageRenderer(size: view.bounds.size)
let image = render.image { (context) in
view.drawHierarchy(in: view.bounds, afterScreenUpdates: true)
}
return image
}
func snapShotInBackground(viewLayer: CALayer, viewBounds: CGRect) -> UIImage {
let renderer = UIGraphicsImageRenderer(bounds: viewBounds)
return renderer.image { rendererContext in
viewLayer.render(in: rendererContext.cgContext)
}
}
}
The first one snapshotInMaincompletely execute in main thread, it froze the app but the screenshot itself is taken faster and I have a smoother video from it
the second one snapShotInBackground, the layer and bounds are calculated in the main thread, but then will execute in background, but it's way slower from the the one that execute in main (first one).
it works like that
DispatchQueue.main.async {
let layer = self.selectedView.layer
let bounds = self.selectedView.bounds
DispatchQueue.global(qos: .background).async {
let image = self.selectedView.snapShotInBackground(viewLayer: layer, viewBounds: bounds)
}
}
My job is really depand on it, I'm pretty stuck here and really need help. please help me to find the best option possible. My main requests are
1- App should not freeze
2- The taking snapshot should be fast that I can do it 30 times per sec.
Thank you! Looking forward to your help
You can use a Timer to throttle the snapshots on the main thread. Here's an example
#objc
private func beginTapped() {
print("Start time \(Date.now)")
frameCount = 0
let timer = Timer.scheduledTimer(timeInterval: 0.03, target: self, selector: #selector(takeSnapshot), userInfo: nil, repeats: true)
DispatchQueue.main.asyncAfter(deadline: .now() + 5, execute: {
timer.invalidate()
print("Finished at \(Date.now) with \(self.frameCount) frames captured")
})
}
#objc
private func takeSnapshot() {
frameCount += 1
let render = UIGraphicsImageRenderer(size: selectedView.bounds.size)
let _ = render.image { (context) in
selectedView.drawHierarchy(in: selectedView.bounds, afterScreenUpdates: true)
}
}
which results in
Start time 2022-07-01 04:20:01 +0000
Finished at 2022-07-01 04:20:07 +0000 with 180 frames captured
Decreasing the timeInterval to 0.01 yields
Start time 2022-07-01 04:25:30 +0000
Finished at 2022-07-01 04:25:36 +0000 with 506 frames captured
CPU usage peaked around 20% during this time and the app did not lag at all in my trivial example.
Rather than taking the snapshot I thing use generator is a good option here it takes cpu but not memory which cause smoother ui/ux
class VideoController:UIViewController {
var timer = Timer()
var secs:Double = 0
var urls = [URL?]() {
didSet {
print(urls.count)
}
}
override func viewDidLoad() {
super.viewDidLoad()
everSecond()
}
func everSecond() {
// every second trigger
timer = Timer.scheduledTimer(timeInterval: 1, target: self, selector: #selector(generate), userInfo: nil, repeats: true)
}
#objc
func generate() {
let url = Bundle.main.url(forResource: "Collection", withExtension: "mov")
let videoAsset = AVAsset(url: url!)
let duration = videoAsset.duration.seconds // duration of video
if secs < duration {
var timesArray:[NSValue] = []
for i in 1...30 {
let value:Double = Double(31 - i) // invers
let t = CMTime(value: CMTimeValue(1 / value) + Int64(secs), timescale: 1) // getting time
timesArray.append(NSValue(time: t)) // appending array
}
let generator = AVAssetImageGenerator(asset: videoAsset)
generator.requestedTimeToleranceBefore = .zero
//Optional generator.requestedTimeToleranceAfter = .zero //Optional
generator.generateCGImagesAsynchronously(forTimes: timesArray) { requestedTime, image, actualTime, result, error in
if let img = image {
let m = UIImage(cgImage: img) // uiimage
let url = ViewController.saveImageInDocumentDirectory(image: m, fileName: "\(Date().timeStamp()).jpeg") //saved in documents with unique name
self.urls.append(url) // append url to array urls for save refrecnce
}
}
secs += 1
} else {
timer.invalidate()
}
}
public static func saveImageInDocumentDirectory(image: UIImage, fileName: String) -> URL? {
let documentsUrl = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!;
let fileURL = documentsUrl.appendingPathComponent(fileName)
if let imageData = image.pngData() {
try? imageData.write(to: fileURL, options: .atomic)
return fileURL
}
return nil
}
public static func loadImageFromDocumentDirectory(fileName: String) -> UIImage? {
let documentsUrl = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!;
let fileURL = documentsUrl.appendingPathComponent(fileName)
do {
let imageData = try Data(contentsOf: fileURL)
return UIImage(data: imageData)
} catch {}
return nil
}
}
extension Date {
func timeStamp() -> String {
let fomatter = DateFormatter()
fomatter.dateFormat = "yyyy-MM-dd HH:mm:ss:SSS"
return fomatter.string(from: self)
}
}
I want to extract a UIIMage from a video player at a certain time point and do some processing on that image.
I have this code:
pausePlayer()
let time = player!.currentTime()
imageFromVideo(url: fileURL(for: movieName!), at: time.seconds) { image in
let result = self.detectLines(image: image!
if let result = result {
// Display results by handing off to the InferenceViewController.
DispatchQueue.main.async {
self.drawNewResults(result: result)
}
}
and
public func imageFromVideo(url: URL, at time: TimeInterval, completion: #escaping (UIImage?) -> Void) {
DispatchQueue.global(qos: .background).async {
let asset = AVURLAsset(url: url)
let assetIG = AVAssetImageGenerator(asset: asset)
assetIG.appliesPreferredTrackTransform = true
assetIG.apertureMode = AVAssetImageGenerator.ApertureMode.encodedPixels
let cmTime = CMTime(seconds: time, preferredTimescale: CMTimeScale(30.0))
var newTime = CMTime(seconds: time, preferredTimescale: CMTimeScale(30.0))
let thumbnailImageRef: CGImage
do {
thumbnailImageRef = try assetIG.copyCGImage(at: cmTime, actualTime: &newTime)
} catch let error {
print("Error: \(error)")
return completion(nil)
}
print("Time on click: %f", cmTime.seconds)
print("Actual time: %f", newTime.seconds)
DispatchQueue.main.async {
completion(UIImage(cgImage: thumbnailImageRef))
}
}
}
The problem is that there is a huge gap between the time I want the image and the image I get. Typically it is something like 0.2s and 0.4s, roughly an offset between 7 14 frames. So the processing I do and show is just wrong since I am not working on the image that is observed.
Example:
Time on click: %f 0.8333333333333334
Actual time: %f 1.0666666666666667
Time on click: %f 1.6333333333333333
Actual time: %f 2.1666666666666665
As suggested here: swift: How to take screenshot of AVPlayerLayer()
assetIG.requestedTimeToleranceAfter = CMTime.zero
assetIG.requestedTimeToleranceBefore = CMTime.zero
did the trick.
I have a screen recorder that can record two AVPlayer playings simultaneously but I want to improve the frame rate per second to 25.
I use AVAssetImageGenerator() to take a still and then load this image onto a View hidden underneath the corresponding AVPlayer. I then take a screenshot using UIGraphicsGetImageFromCurrentImageContext() combining the lot together. I then save the images to the app. This function happens around 14 times a second. When the recording stops, I use FFMPEG to concatenate all the images together into a video to around 30 fps.
The video result looks okay but I like to improve the number of screenshots I take per second further so it looks smoother. Any ideas on how I could improve the code to take a few more screenshots per second? I hope this makes sense.
var limit = 2000
var screenshotTaken = 0
var view: UIView?
var screenRecording: Bool = false
var compilingVideo: Bool = false
let leftPlayerUrl: URL?
let leftPlayer: AVPlayer?
let leftPlayerImageView: UIImageView?
let rightPlayerUrl: URL?
let rightPlayer: AVPlayer?
let rightPlayerImageView: UIImageView?
init(view: UIView, leftPlayerUrl: URL, leftPlayer: AVPlayer, leftPlayerImageView: UIImageView, rightPlayerUrl: URL, rightPlayer: AVPlayer, rightPlayerImageView: UIImageView) {
self.view = view
self.leftPlayerUrl = leftPlayerUrl
self.leftPlayer = leftPlayer
self.leftPlayerImageView = leftPlayerImageView
self.rightPlayerUrl = rightPlayerUrl
self.rightPlayer = rightPlayer
self.rightPlayerImageView = rightPlayerImageView
}
func capture()
{
if screenRecording {
if limit >= screenshotTaken {
//the delay should be 0.04 to hit 25 fps but the max screenshots taken is 16 per second
delay(0.07) {
DispatchQueue.main.async {
self.complexScreenshot()
}
self.capture()
}
} else {
DebugPrint.DBprint("Screenshot limit reached or recording stopped")
delegate?.screenShotLimitReached()
}
}
}
func delay(_ delay: Double, closure: #escaping ()->()) {
DispatchQueue.main.asyncAfter(deadline: DispatchTime.now() + Double(Int64(delay * Double(NSEC_PER_SEC))) / Double(NSEC_PER_SEC), execute: closure)
}
#objc func complexScreenshot() {
guard let url = leftPlayerUrl else {return}
let asset = AVAsset(url: url)
let imageGenerator = AVAssetImageGenerator(asset: asset)
imageGenerator.maximumSize = CGSize(width: 640, height: 480)
imageGenerator.requestedTimeToleranceAfter = CMTime.zero
imageGenerator.requestedTimeToleranceBefore = CMTime.zero
if let thumb: CGImage = try? imageGenerator.copyCGImage(at: leftPlayer?.currentTime() ?? CMTime.zero, actualTime: nil) {
let videoImage = UIImage(cgImage: thumb)
self.leftPlayerImageView?.image = videoImage
}
guard let url2 = rightPlayerUrl else {return}
let asset2 = AVAsset(url: url2)
let imageGenerator2 = AVAssetImageGenerator(asset: asset2)
imageGenerator2.maximumSize = CGSize(width: 640, height: 480)
imageGenerator2.requestedTimeToleranceAfter = CMTime.zero
imageGenerator2.requestedTimeToleranceBefore = CMTime.zero
if let thumb2: CGImage = try? imageGenerator2.copyCGImage(at: rightPlayer?.currentTime() ?? CMTime.zero, actualTime: nil) {
let videoImage = UIImage(cgImage: thumb2)
self.rightPlayerImageView?.image = videoImage
}
guard let bounds = view?.bounds else {return}
UIGraphicsBeginImageContextWithOptions(bounds.size, view?.isOpaque ?? true, 0.0)
self.view?.drawHierarchy(in: bounds, afterScreenUpdates: true)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
self.leftPlayerImageView?.image = nil
self.rightPlayerImageView?.image = nil
if image != nil {
DispatchQueue.global(qos: .utility).async { [weak self] in
self?.saveScreenshot(image: image!, number: self!.screenshotTaken)
}
}
screenshotTaken = screenshotTaken + 1
}
func saveScreenshot(image: UIImage, number: Int) {
let number = String(format: "%04d", number)
let filePath = URL(fileURLWithPath: self.mainPath).appendingPathComponent("Temp/image_\(number).jpg")
autoreleasepool {
if let data = image.jpegData(compressionQuality: 0.4),
!self.fileManager.fileExists(atPath: filePath.path) {
do {
try data.write(to: filePath)
} catch {
print("Error saving file: ", error)
}
}
}
}
There is a video file with duration 3 seconds. I need to create 30 frames - UIImages. Capture image each 0.1 second.
I tried to use AVAssetImageGenerator and CMTimeMake but I always getting 30 similar images, or 15 similar images and 15 another similar.
Please help to understand how to make this kind of slideshow from this video. Or maybe there is some better way to do it.
Please see the code below:
static func generate_Thumbnails(forVideoWithURL url : URL) -> [UIImage]? {
let asset = AVAsset(url: url)
var result: [UIImage] = []
let assetImgGenerator = AVAssetImageGenerator(asset: asset)
assetImgGenerator.appliesPreferredTrackTransform = true
for i in 1...30 {
let time: CMTime = CMTimeMake(value: Int64(i), timescale: 10)
do {
let img: CGImage = try assetImgGenerator.copyCGImage(at: time, actualTime: nil)
let frameImg: UIImage = UIImage(cgImage: img)
result.append(frameImg)
} catch {
//return nil
}
}
return result
}
I tried solution from Amin Benarieb, and it seems to work:
static func toImages(fromVideoUrl url: URL) -> [UIImage]? {
let asset = AVAsset(url: url)
guard let reader = try? AVAssetReader(asset: asset) else { return nil }
let videoTrack = asset.tracks(withMediaType: .video).first!
let outputSettings = [String(kCVPixelBufferPixelFormatTypeKey): NSNumber(value: kCVPixelFormatType_32BGRA)]
let trackReaderOutput = AVAssetReaderTrackOutput(track: videoTrack, outputSettings: outputSettings)
reader.add(trackReaderOutput)
reader.startReading()
var images = [UIImage]()
while reader.status == .reading {
autoreleasepool {
if let sampleBuffer = trackReaderOutput.copyNextSampleBuffer() {
if let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) {
let ciImage = CIImage(cvImageBuffer: imageBuffer)
images.append(UIImage(ciImage: ciImage))
}
}
}
}
return images
}
I haven't read the docs for AVAssetImageGenerator, but in practice I've only ever been able to generate 1 image per second. So you should be able to get an image at 1, 2, and 3 seconds (but not 30). Here is the code I use to generate images, which is very similar to yours.
private func getPreviewImage(forURL url: URL, atSeconds seconds: Double) -> UIImage? {
let asset = AVURLAsset(url: url)
let generator = AVAssetImageGenerator(asset: asset)
generator.appliesPreferredTrackTransform = true
let timestamp = CMTime(seconds: seconds, preferredTimescale: 100)
do {
let imageRef = try generator.copyCGImage(at: timestamp, actualTime: nil)
return UIImage(cgImage: imageRef)
}
catch let error as NSError
{
print("Image generation failed with error \(error)")
return nil
}
}
I am following this code to get all frames from video. In this link he is trying to get a frame at a specific time. But I need to get all frames. Here is my code...
var mutableVideoURL = NSURL()
var videoFrames = [UIImage]()
let asset : AVAsset = AVAsset(url: self.mutableVideoURL as URL)
let mutableVideoDuration = CMTimeGetSeconds(asset.duration)
print("-----Mutable video duration = \(mutableVideoDuration)")
let mutableVideoDurationIntValue = Int(mutableVideoDuration)
print("-----Int value of mutable video duration = \(mutableVideoDurationIntValue)")
for index in 0..<mutableVideoDurationIntValue {
self.generateFrames(url: self.mutableVideoURL, fromTime: Float64(index))
}
func generateFrames(url : NSURL, fromTime:Float64) {
let asset: AVAsset = AVAsset(url: url as URL)
let assetImgGenerate : AVAssetImageGenerator = AVAssetImageGenerator(asset: asset)
assetImgGenerate.appliesPreferredTrackTransform = true
let time : CMTime = CMTimeMakeWithSeconds(fromTime, 600)
var img: CGImage?
do {
img = try assetImgGenerate.copyCGImage(at:time, actualTime: nil)
} catch {
}
if img != nil {
let frameImg: UIImage = UIImage(cgImage: img!)
UIImageWriteToSavedPhotosAlbum(frameImg, nil, nil, nil)//I saved here to check
videoFrames.append(frameImg)
print("-----Array of video frames *** \(videoFrames)")
} else {
print("error !!!")
}
}
I tested this code with 2 videos(length of the videos are 5 seconds and 3.45 minutes). This code works perfectly with the small duration(video length: 5 seconds) and with long duration (video length: 3.45 minutes), NSLog shows Message from debugger: Terminated due to memory issue
Any assistance would be appreciated.
When generating more than 1 frame Apple recommends using the method:
generateCGImagesAsynchronously(forTimes:completionHandler:)
Still, if you prefer to follow your current approach there are a couple of improvements you could do to reduce memory usage:
You are instantiating AVAsset and AVAssetImageGenerator inside the loop, you could instantiate them just once and send it to the method generateFrames.
Remove the line
UIImageWriteToSavedPhotosAlbum(frameImg, nil, nil, nil)//I saved here to check
because you are saving every frame in the photos
album, that takes extra memory.
Final result could look like this:
var videoFrames:[UIImage] = [UIImage]
let asset:AVAsset = AVAsset(url:self.mutableVideoURL as URL)
let assetImgGenerate:AVAssetImageGenerator = AVAssetImageGenerator(asset:asset)
assetImgGenerate.appliesPreferredTrackTransform = true
let duration:Float64 = CMTimeGetSeconds(asset.duration)
let durationInt:Int = Int(mutableVideoDuration)
for index:Int in 0 ..< durationInt
{
generateFrames(
assetImgGenerate:assetImgGenerate,
fromTime:Float64(index))
}
func generateFrames(
assetImgGenerate:AVAssetImageGenerator,
fromTime:Float64)
{
let time:CMTime = CMTimeMakeWithSeconds(fromTime, 600)
let cgImage:CGImage?
do
{
cgImage = try assetImgGenerate.copyCGImage(at:time, actualTime:nil)
}
catch
{
cgImage = nil
}
guard
let img:CGImage = cgImage
else
{
continue
}
let frameImg:UIImage = UIImage(cgImage:img)
videoFrames.append(frameImg)
}
Update for Swift 4.2
var videoUrl:URL // use your own url
var frames:[UIImage]
private var generator:AVAssetImageGenerator!
func getAllFrames() {
let asset:AVAsset = AVAsset(url:self.videoUrl)
let duration:Float64 = CMTimeGetSeconds(asset.duration)
self.generator = AVAssetImageGenerator(asset:asset)
self.generator.appliesPreferredTrackTransform = true
self.frames = []
for index:Int in 0 ..< Int(duration) {
self.getFrame(fromTime:Float64(index))
}
self.generator = nil
}
private func getFrame(fromTime:Float64) {
let time:CMTime = CMTimeMakeWithSeconds(fromTime, preferredTimescale:600)
let image:CGImage
do {
try image = self.generator.copyCGImage(at:time, actualTime:nil)
} catch {
return
}
self.frames.append(UIImage(cgImage:image))
}