If I have a MPMoviePlayerController in Swift:
MPMoviePlayerController mp = MPMoviePlayerController(contentURL: url)
Is there a way I can get the number of frames within the video located at url? If not, is there some other way to determine the frame count?
I don't think MPMoviePlayerController can help you.
Use an AVAssetReader and count the number of CMSampleBuffers it returns to you. You can configure it to not even decode the frames, effectively parsing the file, so it should be fast and memory efficient.
Something like
var asset = AVURLAsset(URL: url, options: nil)
var reader = AVAssetReader(asset: asset, error: nil)
var videoTrack = asset.tracksWithMediaType(AVMediaTypeVideo)[0] as! AVAssetTrack
var readerOutput = AVAssetReaderTrackOutput(track: videoTrack, outputSettings: nil) // NB: nil, should give you raw frames
reader.addOutput(readerOutput)
reader.startReading()
var nFrames = 0
while true {
var sampleBuffer = readerOutput.copyNextSampleBuffer()
if sampleBuffer == nil {
break
}
nFrames++
}
println("Num frames: \(nFrames)")
Sorry if that's not idiomatic, I don't know swift.
Swift 5
func getNumberOfFrames(url: URL) -> Int {
let asset = AVURLAsset(url: url, options: nil)
do {
let reader = try AVAssetReader(asset: asset)
//AVAssetReader(asset: asset, error: nil)
let videoTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let readerOutput = AVAssetReaderTrackOutput(track: videoTrack, outputSettings: nil) // NB: nil, should give you raw frames
reader.add(readerOutput)
reader.startReading()
var nFrames = 0
while true {
let sampleBuffer = readerOutput.copyNextSampleBuffer()
if sampleBuffer == nil {
break
}
nFrames = nFrames+1
}
print("Num frames: \(nFrames)")
return nFrames
}catch {
print("Error: \(error)")
}
return 0
}
You could also use frames per second to calculate total frames.
var player: AVPlayer?
var playerController = AVPlayerViewController()
var videoFPS: Int = 0
var totalFrames: Int?
guard let videoURL = "" else { return }
player = AVPlayer(url: videoURL)
playerController.player = player
guard player?.currentItem?.asset != nil else {
return
}
let asset = self.player?.currentItem?.asset
let tracks = asset!.tracks(withMediaType: .video)
let fps = tracks.first?.nominalFrameRate
let duration = self.player?.currentItem?.duration
self.videoFPS = lround(Double(fps!))
self.totalFrames = lround(Double(self!.videoFPS) * durationSeconds)
Related
There is a video file with duration 3 seconds. I need to create 30 frames - UIImages. Capture image each 0.1 second.
I tried to use AVAssetImageGenerator and CMTimeMake but I always getting 30 similar images, or 15 similar images and 15 another similar.
Please help to understand how to make this kind of slideshow from this video. Or maybe there is some better way to do it.
Please see the code below:
static func generate_Thumbnails(forVideoWithURL url : URL) -> [UIImage]? {
let asset = AVAsset(url: url)
var result: [UIImage] = []
let assetImgGenerator = AVAssetImageGenerator(asset: asset)
assetImgGenerator.appliesPreferredTrackTransform = true
for i in 1...30 {
let time: CMTime = CMTimeMake(value: Int64(i), timescale: 10)
do {
let img: CGImage = try assetImgGenerator.copyCGImage(at: time, actualTime: nil)
let frameImg: UIImage = UIImage(cgImage: img)
result.append(frameImg)
} catch {
//return nil
}
}
return result
}
I tried solution from Amin Benarieb, and it seems to work:
static func toImages(fromVideoUrl url: URL) -> [UIImage]? {
let asset = AVAsset(url: url)
guard let reader = try? AVAssetReader(asset: asset) else { return nil }
let videoTrack = asset.tracks(withMediaType: .video).first!
let outputSettings = [String(kCVPixelBufferPixelFormatTypeKey): NSNumber(value: kCVPixelFormatType_32BGRA)]
let trackReaderOutput = AVAssetReaderTrackOutput(track: videoTrack, outputSettings: outputSettings)
reader.add(trackReaderOutput)
reader.startReading()
var images = [UIImage]()
while reader.status == .reading {
autoreleasepool {
if let sampleBuffer = trackReaderOutput.copyNextSampleBuffer() {
if let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) {
let ciImage = CIImage(cvImageBuffer: imageBuffer)
images.append(UIImage(ciImage: ciImage))
}
}
}
}
return images
}
I haven't read the docs for AVAssetImageGenerator, but in practice I've only ever been able to generate 1 image per second. So you should be able to get an image at 1, 2, and 3 seconds (but not 30). Here is the code I use to generate images, which is very similar to yours.
private func getPreviewImage(forURL url: URL, atSeconds seconds: Double) -> UIImage? {
let asset = AVURLAsset(url: url)
let generator = AVAssetImageGenerator(asset: asset)
generator.appliesPreferredTrackTransform = true
let timestamp = CMTime(seconds: seconds, preferredTimescale: 100)
do {
let imageRef = try generator.copyCGImage(at: timestamp, actualTime: nil)
return UIImage(cgImage: imageRef)
}
catch let error as NSError
{
print("Image generation failed with error \(error)")
return nil
}
}
I am using AVAssetWriter for compressing video but it taking 20 to 25 seconds to compress a video of size 160 in 12 mb. i want fix bitrate of 1600 kbps and natural frame of video. Is there any way to compress quickly?
func compressFile(urlToCompress: URL, outputURL: URL, completion:#escaping (URL)->Void) {
//video file to make the asset
var audioFinished = false
var videoFinished = false
let asset = AVAsset(url: urlToCompress);
//create asset reader
do{
assetReader = try AVAssetReader(asset: asset)
} catch{
assetReader = nil
}
guard let reader = assetReader else{
fatalError("Could not initalize asset reader probably failed its try catch")
}
let videoTrack = asset.tracks(withMediaType: AVMediaType.video).first!
let audioTrack = asset.tracks(withMediaType: AVMediaType.audio).first!
let videoReaderSettings: [String:Any] = [(kCVPixelBufferPixelFormatTypeKey as String?)!:kCVPixelFormatType_32ARGB ]
// MARK: ADJUST BIT RATE OF VIDEO HERE
let videoSettings:[String:Any] = [
AVVideoCompressionPropertiesKey: [AVVideoAverageBitRateKey: self.bitrate] as Any,
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoHeightKey: videoTrack.naturalSize.height,//352,
AVVideoWidthKey: videoTrack.naturalSize.width //640//
]
let assetReaderVideoOutput = AVAssetReaderTrackOutput(track: videoTrack, outputSettings: videoReaderSettings)
let assetReaderAudioOutput = AVAssetReaderTrackOutput(track: audioTrack, outputSettings: nil)
if reader.canAdd(assetReaderVideoOutput){
reader.add(assetReaderVideoOutput)
}else{
fatalError("Couldn't add video output reader")
}
if reader.canAdd(assetReaderAudioOutput){
reader.add(assetReaderAudioOutput)
}else{
fatalError("Couldn't add audio output reader")
}
let audioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: nil)
let videoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
videoInput.transform = videoTrack.preferredTransform
//we need to add samples to the video input
let videoInputQueue = DispatchQueue(label: "videoQueue")
let audioInputQueue = DispatchQueue(label: "audioQueue")
do{
assetWriter = try AVAssetWriter(outputURL: outputURL, fileType: AVFileType.mov)
}catch{
assetWriter = nil
}
guard let writer = assetWriter else{
fatalError("assetWriter was nil")
}
writer.shouldOptimizeForNetworkUse = true
writer.add(videoInput)
writer.add(audioInput)
writer.startWriting()
reader.startReading()
writer.startSession(atSourceTime: CMTime.zero)
let closeWriter:()->Void = {
if (audioFinished && videoFinished){
self.assetWriter?.finishWriting(completionHandler: {
self.checkFileSize(sizeUrl: (self.assetWriter?.outputURL)!, message: "The file size of the compressed file is: ")
completion((self.assetWriter?.outputURL)!)
})
self.assetReader?.cancelReading()
}
}
audioInput.requestMediaDataWhenReady(on: audioInputQueue) {
while(audioInput.isReadyForMoreMediaData){
let sample = assetReaderAudioOutput.copyNextSampleBuffer()
if (sample != nil){
audioInput.append(sample!)
}else{
audioInput.markAsFinished()
DispatchQueue.main.async {
audioFinished = true
closeWriter()
}
break;
}
}
}
videoInput.requestMediaDataWhenReady(on: videoInputQueue) {
//request data here
while(videoInput.isReadyForMoreMediaData){
let sample = assetReaderVideoOutput.copyNextSampleBuffer()
if (sample != nil){
videoInput.append(sample!)
}else{
videoInput.markAsFinished()
DispatchQueue.main.async {
videoFinished = true
closeWriter()
}
break;
}
}
}
}
Edit:- Using above method it converts video on different bitrate not on defined bit rate why?
Any Direction will appreciate.
Thanks in advance
This is a non-trivial problem. There is no "simple" answer to how to encode faster. If you are unfamiliar with this, you are better off picking another codec bit-rates and codecs that encode faster. You have to assume the library is doing it's job. So, there a few things you can do:
Make the source file smaller -- or chunk it
Run it on a cloud service which has beefier hardware
You can choose another codec and a lower set of bit-rate + width and height. (https://developer.apple.com/documentation/avfoundation/avvideocodectype)
You can potentially spin up multiple threads and encode faster this way but I doubt it would gain you much
I am following this code to get all frames from video. In this link he is trying to get a frame at a specific time. But I need to get all frames. Here is my code...
var mutableVideoURL = NSURL()
var videoFrames = [UIImage]()
let asset : AVAsset = AVAsset(url: self.mutableVideoURL as URL)
let mutableVideoDuration = CMTimeGetSeconds(asset.duration)
print("-----Mutable video duration = \(mutableVideoDuration)")
let mutableVideoDurationIntValue = Int(mutableVideoDuration)
print("-----Int value of mutable video duration = \(mutableVideoDurationIntValue)")
for index in 0..<mutableVideoDurationIntValue {
self.generateFrames(url: self.mutableVideoURL, fromTime: Float64(index))
}
func generateFrames(url : NSURL, fromTime:Float64) {
let asset: AVAsset = AVAsset(url: url as URL)
let assetImgGenerate : AVAssetImageGenerator = AVAssetImageGenerator(asset: asset)
assetImgGenerate.appliesPreferredTrackTransform = true
let time : CMTime = CMTimeMakeWithSeconds(fromTime, 600)
var img: CGImage?
do {
img = try assetImgGenerate.copyCGImage(at:time, actualTime: nil)
} catch {
}
if img != nil {
let frameImg: UIImage = UIImage(cgImage: img!)
UIImageWriteToSavedPhotosAlbum(frameImg, nil, nil, nil)//I saved here to check
videoFrames.append(frameImg)
print("-----Array of video frames *** \(videoFrames)")
} else {
print("error !!!")
}
}
I tested this code with 2 videos(length of the videos are 5 seconds and 3.45 minutes). This code works perfectly with the small duration(video length: 5 seconds) and with long duration (video length: 3.45 minutes), NSLog shows Message from debugger: Terminated due to memory issue
Any assistance would be appreciated.
When generating more than 1 frame Apple recommends using the method:
generateCGImagesAsynchronously(forTimes:completionHandler:)
Still, if you prefer to follow your current approach there are a couple of improvements you could do to reduce memory usage:
You are instantiating AVAsset and AVAssetImageGenerator inside the loop, you could instantiate them just once and send it to the method generateFrames.
Remove the line
UIImageWriteToSavedPhotosAlbum(frameImg, nil, nil, nil)//I saved here to check
because you are saving every frame in the photos
album, that takes extra memory.
Final result could look like this:
var videoFrames:[UIImage] = [UIImage]
let asset:AVAsset = AVAsset(url:self.mutableVideoURL as URL)
let assetImgGenerate:AVAssetImageGenerator = AVAssetImageGenerator(asset:asset)
assetImgGenerate.appliesPreferredTrackTransform = true
let duration:Float64 = CMTimeGetSeconds(asset.duration)
let durationInt:Int = Int(mutableVideoDuration)
for index:Int in 0 ..< durationInt
{
generateFrames(
assetImgGenerate:assetImgGenerate,
fromTime:Float64(index))
}
func generateFrames(
assetImgGenerate:AVAssetImageGenerator,
fromTime:Float64)
{
let time:CMTime = CMTimeMakeWithSeconds(fromTime, 600)
let cgImage:CGImage?
do
{
cgImage = try assetImgGenerate.copyCGImage(at:time, actualTime:nil)
}
catch
{
cgImage = nil
}
guard
let img:CGImage = cgImage
else
{
continue
}
let frameImg:UIImage = UIImage(cgImage:img)
videoFrames.append(frameImg)
}
Update for Swift 4.2
var videoUrl:URL // use your own url
var frames:[UIImage]
private var generator:AVAssetImageGenerator!
func getAllFrames() {
let asset:AVAsset = AVAsset(url:self.videoUrl)
let duration:Float64 = CMTimeGetSeconds(asset.duration)
self.generator = AVAssetImageGenerator(asset:asset)
self.generator.appliesPreferredTrackTransform = true
self.frames = []
for index:Int in 0 ..< Int(duration) {
self.getFrame(fromTime:Float64(index))
}
self.generator = nil
}
private func getFrame(fromTime:Float64) {
let time:CMTime = CMTimeMakeWithSeconds(fromTime, preferredTimescale:600)
let image:CGImage
do {
try image = self.generator.copyCGImage(at:time, actualTime:nil)
} catch {
return
}
self.frames.append(UIImage(cgImage:image))
}
I capture video using AVCaptureSession session preset is
session!.sessionPreset = AVCaptureSessionPreset1280x720
and extract image from video using this code
func videoThumbnails(url: NSURL ){
let asset = AVAsset(URL: url)
let imageGenerator = AVAssetImageGenerator(asset: asset)
imageGenerator.appliesPreferredTrackTransform = true
imageGenerator.maximumSize = CGSizeMake(720, 1280)
imageGenerator.requestedTimeToleranceAfter = kCMTimeZero
var time = asset.duration
let totalTime = time
var frames = 0.0
let singleFrame = Double(time.seconds) / 4
while (frames < totalTime.seconds) {
frames += singleFrame
time.value = (Int64(frames)) * Int64(totalTime.timescale)
do {
let imageRef = try imageGenerator.copyCGImageAtTime(time, actualTime: nil)
self.sendImage.append(UIImage(CGImage: imageRef))
}
catch let error as NSError
{
print("Image generation failed with error \(error)")
}
}
self.performSegueWithIdentifier("showCapturedImages", sender: nil)
}
Now you can check image above the video so that you can check color difference.
Now what I want is extract exact image from video. how to do?
I'm trying to record segments of audio and recombine them without producing a gap in audio.
The eventual goal is to also have video, but I've found that audio itself creates gaps when combined with ffmpeg -f concat -i list.txt -c copy out.mp4
If I put the audio in an HLS playlist, there are also gaps, so I don't think this is unique to ffmpeg.
The idea is that samples come in continuously, and my controller routes samples to the proper AVAssetWriter. How do I eliminate gaps in audio?
import Foundation
import UIKit
import AVFoundation
class StreamController: UIViewController, AVCaptureAudioDataOutputSampleBufferDelegate, AVCaptureVideoDataOutputSampleBufferDelegate {
var closingAudioInput: AVAssetWriterInput?
var closingAssetWriter: AVAssetWriter?
var currentAudioInput: AVAssetWriterInput?
var currentAssetWriter: AVAssetWriter?
var nextAudioInput: AVAssetWriterInput?
var nextAssetWriter: AVAssetWriter?
var videoHelper: VideoHelper?
var startTime: NSTimeInterval = 0
let closeAssetQueue: dispatch_queue_t = dispatch_queue_create("closeAssetQueue", nil);
override func viewDidLoad() {
super.viewDidLoad()
startTime = NSDate().timeIntervalSince1970
createSegmentWriter()
videoHelper = VideoHelper()
videoHelper!.delegate = self
videoHelper!.startSession()
NSTimer.scheduledTimerWithTimeInterval(1, target: self, selector: "createSegmentWriter", userInfo: nil, repeats: true)
}
func createSegmentWriter() {
print("Creating segment writer at t=\(NSDate().timeIntervalSince1970 - self.startTime)")
let outputPath = OutputFileNameHelper.instance.pathForOutput()
OutputFileNameHelper.instance.incrementSegmentIndex()
try? NSFileManager.defaultManager().removeItemAtPath(outputPath)
nextAssetWriter = try! AVAssetWriter(URL: NSURL(fileURLWithPath: outputPath), fileType: AVFileTypeMPEG4)
nextAssetWriter!.shouldOptimizeForNetworkUse = true
let audioSettings: [String:AnyObject] = EncodingSettings.AUDIO
nextAudioInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: audioSettings)
nextAudioInput!.expectsMediaDataInRealTime = true
nextAssetWriter?.addInput(nextAudioInput!)
nextAssetWriter!.startWriting()
}
func closeWriterIfNecessary() {
if closing && audioFinished {
closing = false
audioFinished = false
let outputFile = closingAssetWriter?.outputURL.pathComponents?.last
closingAssetWriter?.finishWritingWithCompletionHandler() {
let delta = NSDate().timeIntervalSince1970 - self.startTime
print("segment \(outputFile!) finished at t=\(delta)")
}
self.closingAudioInput = nil
self.closingAssetWriter = nil
}
}
var audioFinished = false
var closing = false
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBufferRef, fromConnection connection: AVCaptureConnection!) {
if let nextWriter = nextAssetWriter {
if nextWriter.status.rawValue != 0 {
if (currentAssetWriter != nil) {
closing = true
}
var sampleTiming: CMSampleTimingInfo = kCMTimingInfoInvalid
CMSampleBufferGetSampleTimingInfo(sampleBuffer, 0, &sampleTiming)
print("Switching asset writers at t=\(NSDate().timeIntervalSince1970 - self.startTime)")
closingAssetWriter = currentAssetWriter
closingAudioInput = currentAudioInput
currentAssetWriter = nextAssetWriter
currentAudioInput = nextAudioInput
nextAssetWriter = nil
nextAudioInput = nil
currentAssetWriter?.startSessionAtSourceTime(sampleTiming.presentationTimeStamp)
}
}
if let _ = captureOutput as? AVCaptureVideoDataOutput {
} else if let _ = captureOutput as? AVCaptureAudioDataOutput {
captureAudioSample(sampleBuffer)
}
dispatch_async(closeAssetQueue) {
self.closeWriterIfNecessary()
}
}
func printTimingInfo(sampleBuffer: CMSampleBufferRef, prefix: String) {
var sampleTiming: CMSampleTimingInfo = kCMTimingInfoInvalid
CMSampleBufferGetSampleTimingInfo(sampleBuffer, 0, &sampleTiming)
let presentationTime = Double(sampleTiming.presentationTimeStamp.value) / Double(sampleTiming.presentationTimeStamp.timescale)
print("\(prefix):\(presentationTime)")
}
func captureAudioSample(sampleBuffer: CMSampleBufferRef) {
printTimingInfo(sampleBuffer, prefix: "A")
if (closing && !audioFinished) {
if closingAudioInput?.readyForMoreMediaData == true {
closingAudioInput?.appendSampleBuffer(sampleBuffer)
}
closingAudioInput?.markAsFinished()
audioFinished = true
} else {
if currentAudioInput?.readyForMoreMediaData == true {
currentAudioInput?.appendSampleBuffer(sampleBuffer)
}
}
}
}
With packet formats like AAC you have silent priming frames (a.k.a encoder delay) at the beginning and remainder frames at the end (when your audio length is not a multiple of the packet size). In your case it's 2112 of them at the beginning of every file. Priming and remainder frames break the possibility of concatenating the files without transcoding them, so you can't really blame ffmpeg -c copy for not producing seamless output.
I'm not sure where this leaves you with video - obviously audio is synced to the video, even in the presence of priming frames.
It all depends on how you intend to concatenate the final audio (and eventually video). If you're doing it yourself using AVFoundation, then you can detect and account for priming/remainder frames using
CMGetAttachment(buffer, kCMSampleBufferAttachmentKey_TrimDurationAtStart, NULL)
CMGetAttachment(audioBuffer, kCMSampleBufferAttachmentKey_TrimDurationAtEnd, NULL)
As a short term solution, you can switch to a non "packetised" to get gapless, concatenatable (with ffmpeg) files.
e.g.
AVFormatIDKey: kAudioFormatAppleIMA4, fileType: AVFileTypeAIFC, suffix ".aifc" or
AVFormatIDKey: kAudioFormatLinearPCM, fileType: AVFileTypeWAVE, suffix ".wav"
p.s. you can see priming & remainder frames and packet sizes using the ubiquitous afinfo tool.
afinfo chunk.mp4
Data format: 2 ch, 44100 Hz, 'aac ' (0x00000000) 0 bits/channel, 0 bytes/packet, 1024 frames/packet, 0 bytes/frame
...
audio 39596 valid frames + 2112 priming + 276 remainder = 41984
...
Not sure if this helps you but if you have a bunch of MP4s you can use this code to combine them:
func mergeAudioFiles(audioFileUrls: NSArray, callback: (url: NSURL?, error: NSError?)->()) {
// Create the audio composition
let composition = AVMutableComposition()
// Merge
for (var i = 0; i < audioFileUrls.count; i++) {
let compositionAudioTrack :AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
let asset = AVURLAsset(URL: audioFileUrls[i] as! NSURL)
let track = asset.tracksWithMediaType(AVMediaTypeAudio)[0]
let timeRange = CMTimeRange(start: CMTimeMake(0, 600), duration: track.timeRange.duration)
try! compositionAudioTrack.insertTimeRange(timeRange, ofTrack: track, atTime: composition.duration)
}
// Create output url
let format = NSDateFormatter()
format.dateFormat="yyyy-MM-dd-HH-mm-ss"
let currentFileName = "recording-\(format.stringFromDate(NSDate()))-merge.m4a"
print(currentFileName)
let documentsDirectory = NSFileManager.defaultManager().URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)[0]
let outputUrl = documentsDirectory.URLByAppendingPathComponent(currentFileName)
print(outputUrl.absoluteString)
// Export it
let assetExport = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetAppleM4A)
assetExport?.outputFileType = AVFileTypeAppleM4A
assetExport?.outputURL = outputUrl
assetExport?.exportAsynchronouslyWithCompletionHandler({ () -> Void in
switch assetExport!.status {
case AVAssetExportSessionStatus.Failed:
callback(url: nil, error: assetExport?.error)
default:
callback(url: assetExport?.outputURL, error: nil)
}
})
}