I want to extract a UIIMage from a video player at a certain time point and do some processing on that image.
I have this code:
pausePlayer()
let time = player!.currentTime()
imageFromVideo(url: fileURL(for: movieName!), at: time.seconds) { image in
let result = self.detectLines(image: image!
if let result = result {
// Display results by handing off to the InferenceViewController.
DispatchQueue.main.async {
self.drawNewResults(result: result)
}
}
and
public func imageFromVideo(url: URL, at time: TimeInterval, completion: #escaping (UIImage?) -> Void) {
DispatchQueue.global(qos: .background).async {
let asset = AVURLAsset(url: url)
let assetIG = AVAssetImageGenerator(asset: asset)
assetIG.appliesPreferredTrackTransform = true
assetIG.apertureMode = AVAssetImageGenerator.ApertureMode.encodedPixels
let cmTime = CMTime(seconds: time, preferredTimescale: CMTimeScale(30.0))
var newTime = CMTime(seconds: time, preferredTimescale: CMTimeScale(30.0))
let thumbnailImageRef: CGImage
do {
thumbnailImageRef = try assetIG.copyCGImage(at: cmTime, actualTime: &newTime)
} catch let error {
print("Error: \(error)")
return completion(nil)
}
print("Time on click: %f", cmTime.seconds)
print("Actual time: %f", newTime.seconds)
DispatchQueue.main.async {
completion(UIImage(cgImage: thumbnailImageRef))
}
}
}
The problem is that there is a huge gap between the time I want the image and the image I get. Typically it is something like 0.2s and 0.4s, roughly an offset between 7 14 frames. So the processing I do and show is just wrong since I am not working on the image that is observed.
Example:
Time on click: %f 0.8333333333333334
Actual time: %f 1.0666666666666667
Time on click: %f 1.6333333333333333
Actual time: %f 2.1666666666666665
As suggested here: swift: How to take screenshot of AVPlayerLayer()
assetIG.requestedTimeToleranceAfter = CMTime.zero
assetIG.requestedTimeToleranceBefore = CMTime.zero
did the trick.
Related
I have a screen recorder that can record two AVPlayer playings simultaneously but I want to improve the frame rate per second to 25.
I use AVAssetImageGenerator() to take a still and then load this image onto a View hidden underneath the corresponding AVPlayer. I then take a screenshot using UIGraphicsGetImageFromCurrentImageContext() combining the lot together. I then save the images to the app. This function happens around 14 times a second. When the recording stops, I use FFMPEG to concatenate all the images together into a video to around 30 fps.
The video result looks okay but I like to improve the number of screenshots I take per second further so it looks smoother. Any ideas on how I could improve the code to take a few more screenshots per second? I hope this makes sense.
var limit = 2000
var screenshotTaken = 0
var view: UIView?
var screenRecording: Bool = false
var compilingVideo: Bool = false
let leftPlayerUrl: URL?
let leftPlayer: AVPlayer?
let leftPlayerImageView: UIImageView?
let rightPlayerUrl: URL?
let rightPlayer: AVPlayer?
let rightPlayerImageView: UIImageView?
init(view: UIView, leftPlayerUrl: URL, leftPlayer: AVPlayer, leftPlayerImageView: UIImageView, rightPlayerUrl: URL, rightPlayer: AVPlayer, rightPlayerImageView: UIImageView) {
self.view = view
self.leftPlayerUrl = leftPlayerUrl
self.leftPlayer = leftPlayer
self.leftPlayerImageView = leftPlayerImageView
self.rightPlayerUrl = rightPlayerUrl
self.rightPlayer = rightPlayer
self.rightPlayerImageView = rightPlayerImageView
}
func capture()
{
if screenRecording {
if limit >= screenshotTaken {
//the delay should be 0.04 to hit 25 fps but the max screenshots taken is 16 per second
delay(0.07) {
DispatchQueue.main.async {
self.complexScreenshot()
}
self.capture()
}
} else {
DebugPrint.DBprint("Screenshot limit reached or recording stopped")
delegate?.screenShotLimitReached()
}
}
}
func delay(_ delay: Double, closure: #escaping ()->()) {
DispatchQueue.main.asyncAfter(deadline: DispatchTime.now() + Double(Int64(delay * Double(NSEC_PER_SEC))) / Double(NSEC_PER_SEC), execute: closure)
}
#objc func complexScreenshot() {
guard let url = leftPlayerUrl else {return}
let asset = AVAsset(url: url)
let imageGenerator = AVAssetImageGenerator(asset: asset)
imageGenerator.maximumSize = CGSize(width: 640, height: 480)
imageGenerator.requestedTimeToleranceAfter = CMTime.zero
imageGenerator.requestedTimeToleranceBefore = CMTime.zero
if let thumb: CGImage = try? imageGenerator.copyCGImage(at: leftPlayer?.currentTime() ?? CMTime.zero, actualTime: nil) {
let videoImage = UIImage(cgImage: thumb)
self.leftPlayerImageView?.image = videoImage
}
guard let url2 = rightPlayerUrl else {return}
let asset2 = AVAsset(url: url2)
let imageGenerator2 = AVAssetImageGenerator(asset: asset2)
imageGenerator2.maximumSize = CGSize(width: 640, height: 480)
imageGenerator2.requestedTimeToleranceAfter = CMTime.zero
imageGenerator2.requestedTimeToleranceBefore = CMTime.zero
if let thumb2: CGImage = try? imageGenerator2.copyCGImage(at: rightPlayer?.currentTime() ?? CMTime.zero, actualTime: nil) {
let videoImage = UIImage(cgImage: thumb2)
self.rightPlayerImageView?.image = videoImage
}
guard let bounds = view?.bounds else {return}
UIGraphicsBeginImageContextWithOptions(bounds.size, view?.isOpaque ?? true, 0.0)
self.view?.drawHierarchy(in: bounds, afterScreenUpdates: true)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
self.leftPlayerImageView?.image = nil
self.rightPlayerImageView?.image = nil
if image != nil {
DispatchQueue.global(qos: .utility).async { [weak self] in
self?.saveScreenshot(image: image!, number: self!.screenshotTaken)
}
}
screenshotTaken = screenshotTaken + 1
}
func saveScreenshot(image: UIImage, number: Int) {
let number = String(format: "%04d", number)
let filePath = URL(fileURLWithPath: self.mainPath).appendingPathComponent("Temp/image_\(number).jpg")
autoreleasepool {
if let data = image.jpegData(compressionQuality: 0.4),
!self.fileManager.fileExists(atPath: filePath.path) {
do {
try data.write(to: filePath)
} catch {
print("Error saving file: ", error)
}
}
}
}
There is a video file with duration 3 seconds. I need to create 30 frames - UIImages. Capture image each 0.1 second.
I tried to use AVAssetImageGenerator and CMTimeMake but I always getting 30 similar images, or 15 similar images and 15 another similar.
Please help to understand how to make this kind of slideshow from this video. Or maybe there is some better way to do it.
Please see the code below:
static func generate_Thumbnails(forVideoWithURL url : URL) -> [UIImage]? {
let asset = AVAsset(url: url)
var result: [UIImage] = []
let assetImgGenerator = AVAssetImageGenerator(asset: asset)
assetImgGenerator.appliesPreferredTrackTransform = true
for i in 1...30 {
let time: CMTime = CMTimeMake(value: Int64(i), timescale: 10)
do {
let img: CGImage = try assetImgGenerator.copyCGImage(at: time, actualTime: nil)
let frameImg: UIImage = UIImage(cgImage: img)
result.append(frameImg)
} catch {
//return nil
}
}
return result
}
I tried solution from Amin Benarieb, and it seems to work:
static func toImages(fromVideoUrl url: URL) -> [UIImage]? {
let asset = AVAsset(url: url)
guard let reader = try? AVAssetReader(asset: asset) else { return nil }
let videoTrack = asset.tracks(withMediaType: .video).first!
let outputSettings = [String(kCVPixelBufferPixelFormatTypeKey): NSNumber(value: kCVPixelFormatType_32BGRA)]
let trackReaderOutput = AVAssetReaderTrackOutput(track: videoTrack, outputSettings: outputSettings)
reader.add(trackReaderOutput)
reader.startReading()
var images = [UIImage]()
while reader.status == .reading {
autoreleasepool {
if let sampleBuffer = trackReaderOutput.copyNextSampleBuffer() {
if let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) {
let ciImage = CIImage(cvImageBuffer: imageBuffer)
images.append(UIImage(ciImage: ciImage))
}
}
}
}
return images
}
I haven't read the docs for AVAssetImageGenerator, but in practice I've only ever been able to generate 1 image per second. So you should be able to get an image at 1, 2, and 3 seconds (but not 30). Here is the code I use to generate images, which is very similar to yours.
private func getPreviewImage(forURL url: URL, atSeconds seconds: Double) -> UIImage? {
let asset = AVURLAsset(url: url)
let generator = AVAssetImageGenerator(asset: asset)
generator.appliesPreferredTrackTransform = true
let timestamp = CMTime(seconds: seconds, preferredTimescale: 100)
do {
let imageRef = try generator.copyCGImage(at: timestamp, actualTime: nil)
return UIImage(cgImage: imageRef)
}
catch let error as NSError
{
print("Image generation failed with error \(error)")
return nil
}
}
In my app, I want to generate multiple thumbnails of a video, preferably good quality thumbnails. My old approach was to perform a loop 15 times, and copy a CGImage at different time. As shown below
func generateThumbnails(_ fileURL:URL) {
let asset = AVAsset(url: fileURL)
let imageGenerator = AVAssetImageGenerator(asset: asset)
imageGenerator.apertureMode = AVAssetImageGeneratorApertureMode.cleanAperture
imageGenerator.appliesPreferredTrackTransform = true
let duration = asset.duration
let seconds = CMTimeGetSeconds(duration)
let addition = seconds / 15
var number = 1.0
do {
while number < seconds {
let thumbnailCGImage = try imageGenerator.copyCGImage(at: CMTimeMake(Int64(number),1), actualTime: nil)
let image = UIImage(cgImage: thumbnailCGImage)
thumbnails.append(image)
number += addition
}
} catch let err {
return
}
}
However, after doing some more research, I found it was more logical to just generate thumbnails asynchronously using
let imageGenerator = AVAssetImageGenerator(asset: asset)
imageGenerator.generateCGImagesAsynchronously(forTimes: [NSValue], completionHandler: AVAssetImageGeneratorCompletionHandler)
However, I don't really know what the I am supposed to input into the [NSValue] and the completion handler.
I just need an explanation on how to generate thumbnails this way, and see if it's the better thing to do.
https://developer.apple.com/documentation/avfoundation/avassetimagegenerator/1388100-generatecgimagesasynchronously
requestedTimes An array of NSValue objects, each containing a CMTime,
specifying the asset times at which an image is requested.
Usage:
let duration = asset.duration
let seconds = CMTimeGetSeconds(duration)
let addition = seconds / 15
var number = 1.0
var times = [NSValue]()
times.append(NSValue(time: CMTimeMake(Int64(number), 1)))
while number < seconds {
number += addition
times.append(NSValue(time: CMTimeMake(Int64(number), 1)))
}
struct Formatter {
static let formatter: DateFormatter = {
let result = DateFormatter()
result.dateStyle = .short
return result
}()
}
imageGenerator.generateCGImagesAsynchronously(forTimes: times) { (requestedTime, cgImage, actualImageTime, status, error) in
let seconds = CMTimeGetSeconds(requestedTime)
let date = Date(timeIntervalSinceNow: seconds)
let time = Formatter.formatter.string(from: date)
switch status {
case .succeeded: do {
if let image = cgImage {
print("Generated image for approximate time: \(time)")
let img = UIImage(cgImage: image)
//do something with `img`
}
else {
print("Failed to generate a valid image for time: \(time)")
}
}
case .failed: do {
if let error = error {
print("Failed to generate image with Error: \(error) for time: \(time)")
}
else {
print("Failed to generate image for time: \(time)")
}
}
case .cancelled: do {
print("Image generation cancelled for time: \(time)")
}
}
}
I am developing a video based Application in Swift3. Where I have one video url and a Range Slider according to the video duration and user can select any minimum and maximum value from slider. If suppose user has selected min value 3 Sec and Max Value 7 Sec, So for this duration I need to generate a Video Thumbnail Image. For this I am using AVAssetImageGenerator to generate this, I tried below both code to achieve this :
func createThumbnailOfVideoFromFileURL(_ strVideoURL: URL) -> UIImage?{
let asset = AVAsset(url: strVideoURL)
let assetImgGenerate : AVAssetImageGenerator = AVAssetImageGenerator(asset: asset)
assetImgGenerate.appliesPreferredTrackTransform = true
let time = CMTimeMake(1, 30)
let img = try? assetImgGenerate.copyCGImage(at: time, actualTime: nil)
guard let cgImage = img else { return nil }
let frameImg = UIImage(cgImage: cgImage)
return frameImg
}
func generateThumbnailForUrl(vidUrl:URL) -> UIImage {
let asset = AVURLAsset(url: vidUrl, options: nil)
let imgGenerator = AVAssetImageGenerator(asset: asset)
var thmbnlImg = UIImage()
do{
let cgImage = try imgGenerator.copyCGImage(at: CMTimeMake(0, 1), actualTime: nil)
thmbnlImg = UIImage(cgImage: cgImage)
thmbnlImg = thmbnlImg.imageRotatedByDegrees(degrees: 90.0, flip: false)
}
catch{
print(error)
}
// !! check the error before proceeding
return thmbnlImg
}
But the problem is I am getting same thumbnail image using both above methods, bcos I am not setting duration here in both methods. How can I add minimum and maximum duration to generate different thumbnail image for each different duration. Please help me resolve my problem. Thank you!
Edit: I tried to set duration like :
let time: CMTime = CMTimeMakeWithSeconds(rangeSlider!.lowerValue, 1)
Then I am getting different thumbnail image but for some slider ranges I am getting nil thumbnail image also. Can anyone have some idea how to set preferredTimeScale value in CMTimeMakeWithSeconds ?
Try this code
static func generateThumbnail(videoUrl: String) -> UIImage? {
do {
let url = URL(string: videoUrl)
let asset = AVURLAsset(url: url!)
let imageGenerator = AVAssetImageGenerator(asset: asset)
imageGenerator.appliesPreferredTrackTransform = true
let cgImage = try imageGenerator.copyCGImage(at: CMTime(seconds: 2.0, preferredTimescale: 60),
actualTime: nil)
return UIImage(cgImage: cgImage)
} catch {
print(error.localizedDescription)
return nil
}
}
Assume, your trimmed video URL is videoURL. After successfully trimming a video, add this code snippet. Actually this code snippet will help you to extract images from trimmed video at each second (meaning, if the duration of your trimmed video is 10 second, this code will extract 10 images at each second and all of these images are saved in an array, named videoFrames). Lastly, you can do whatever you want with these images. You can also add an activity indicator while this process is going on. Hope this helps.
var videoFrames = [UIImage]()
let asset : AVAsset = AVAsset(url: videoURL as URL)
let videoDuration = CMTimeGetSeconds(asset.duration)
let integerValueOFVideoDuration = Int(videoDuration)
//start activity indicator here
for index in 0..<integerValueOFVideoDuration + 1 {
self.generateFrames(url: videoURL, fromTime: Float64(index))
}
func generateFrames(url: NSURL, fromTime: Float64) {
if videoFrames.count == integerValueOFVideoDuration {
//end activity indicator here
return
}
let asset: AVAsset = AVAsset(url: url as URL)
let assetImgGenerate: AVAssetImageGenerator = AVAssetImageGenerator(asset: asset)
assetImgGenerate.maximumSize = CGSize(width: 300, height: 300)
assetImgGenerate.appliesPreferredTrackTransform = true
let time: CMTime = CMTimeMakeWithSeconds(fromTime, 600)
var img: CGImage?
do {
img = try assetImgGenerate.copyCGImage(at: time, actualTime: nil)
} catch {
}
if img != nil {
let frameImg: UIImage = UIImage(cgImage: img!)
videoFrames.append(frameImg)
} else {
//return nil
}
}
I'm trying to extract frames as UIImages from a video in Swift. I found several Objective C solutions but I'm having trouble finding anything in Swift. Assuming the following is correct can someone either help me to convert the following to Swift or give me their own take on how to do this?
Source:
Grabbing the first frame of a video from UIImagePickerController?
- (UIImage *)imageFromVideo:(NSURL *)videoURL atTime:(NSTimeInterval)time {
AVURLAsset *asset = [[AVURLAsset alloc] initWithURL:videoURL options:nil];
NSParameterAssert(asset);
AVAssetImageGenerator *assetIG =
[[AVAssetImageGenerator alloc] initWithAsset:asset];
assetIG.appliesPreferredTrackTransform = YES;
assetIG.apertureMode = AVAssetImageGeneratorApertureModeEncodedPixels;
CGImageRef thumbnailImageRef = NULL;
CFTimeInterval thumbnailImageTime = time;
NSError *igError = nil;
thumbnailImageRef =
[assetIG copyCGImageAtTime:CMTimeMake(thumbnailImageTime, 60)
actualTime:NULL
error:&igError];
if (!thumbnailImageRef)
NSLog(#"thumbnailImageGenerationError %#", igError );
UIImage *image = thumbnailImageRef
? [[UIImage alloc] initWithCGImage:thumbnailImageRef]
: nil;
return image;
}
It actually did work.
func imageFromVideo(url: URL, at time: TimeInterval) -> UIImage? {
let asset = AVURLAsset(url: url)
let assetIG = AVAssetImageGenerator(asset: asset)
assetIG.appliesPreferredTrackTransform = true
assetIG.apertureMode = AVAssetImageGeneratorApertureModeEncodedPixels
let cmTime = CMTime(seconds: time, preferredTimescale: 60)
let thumbnailImageRef: CGImage
do {
thumbnailImageRef = try assetIG.copyCGImage(at: cmTime, actualTime: nil)
} catch let error {
print("Error: \(error)")
return nil
}
return UIImage(cgImage: thumbnailImageRef)
}
But remember that this function is synchronous and it's better not to call it on the main queue.
You can do either this:
DispatchQueue.global(qos: .background).async {
let image = self.imageFromVideo(url: url, at: 0)
DispatchQueue.main.async {
self.imageView.image = image
}
}
Or use generateCGImagesAsynchronously instead of copyCGImage.
Here's a SWIFT 5 alternative to Dmitry's solution, to not have to worry about what queue you're on:
public func imageFromVideo(url: URL, at time: TimeInterval, completion: #escaping (UIImage?) -> Void) {
DispatchQueue.global(qos: .background).async {
let asset = AVURLAsset(url: url)
let assetIG = AVAssetImageGenerator(asset: asset)
assetIG.appliesPreferredTrackTransform = true
assetIG.apertureMode = AVAssetImageGenerator.ApertureMode.encodedPixels
let cmTime = CMTime(seconds: time, preferredTimescale: 60)
let thumbnailImageRef: CGImage
do {
thumbnailImageRef = try assetIG.copyCGImage(at: cmTime, actualTime: nil)
} catch let error {
print("Error: \(error)")
return completion(nil)
}
DispatchQueue.main.async {
completion(UIImage(cgImage: thumbnailImageRef))
}
}
}
Here's now to use it:
imageFromVideo(url: videoUrl, at: 0) { image in
// Do something with the image here
}
You can do this easily on iOS. Below is a code snippet on how to do so with Swift.
let url = Bundle.main.url(forResource: "video_name", withExtension: "mp4")
let videoAsset = AVAsset(url: url!)
let t1 = CMTime(value: 1, timescale: 1)
let t2 = CMTime(value: 4, timescale: 1)
let t3 = CMTime(value: 8, timescale: 1)
let timesArray = [
NSValue(time: t1),
NSValue(time: t2),
NSValue(time: t3)
]
let generator = AVAssetImageGenerator(asset: videoAsset)
generator.requestedTimeToleranceBefore = .zero
generator.requestedTimeToleranceAfter = .zero
generator.generateCGImagesAsynchronously(forTimes: timesArray ) { requestedTime, image, actualTime, result, error in
let img = UIImage(cgImage: image!)
}
You can find the demo code here and the medium article here.
Here's async/await version of #Dmitry 's answer for those who doesn't like completion handlers
func imageFromVideo(url: URL, at time: TimeInterval) async throws -> UIImage {
try await withCheckedThrowingContinuation({ continuation in
DispatchQueue.global(qos: .background).async {
let asset = AVURLAsset(url: url)
let assetIG = AVAssetImageGenerator(asset: asset)
assetIG.appliesPreferredTrackTransform = true
assetIG.apertureMode = AVAssetImageGenerator.ApertureMode.encodedPixels
let cmTime = CMTime(seconds: time, preferredTimescale: 60)
let thumbnailImageRef: CGImage
do {
thumbnailImageRef = try assetIG.copyCGImage(at: cmTime, actualTime: nil)
} catch {
continuation.resume(throwing: error)
return
}
continuation.resume(returning: UIImage(cgImage: thumbnailImageRef))
}
})
}
Usage:
let vidUrl = <#your url#>
do {
let firstFrame = try await imageFromVideo(url: vidUrl, at: 0)
// do something with image
} catch {
// handle error
}
Or like this if you're in throwing function:
func someThrowingFunc() throws {
let vidUrl = <#your url#>
let firstFrame = try await imageFromVideo(url: vidUrl, at: 0)
// do something with image
}