Hi I have been trying to figure out how to implement movie looping in GPUImage2, but have been unsuccessful so far. The MovieInput class in GPUImage2 uses AVAssetReader to playback the movie files, so I researched ways to loop AVAssetReader. I found this question on StackOverFlow dealing with this topic. AVFoundation to reproduce a video loop
The best answer was
"AVAssetReader doesn't support seeking or restarting, it is essentially a sequential decoder. You have to create a new AVAssetReader object to read the same samples again."
I tried to figure out how to connect the old assetReader to a new one one and I was not very successful and it crashed every time.
I was recommended to try something like this, but I am not exactly sure how to write the function generateAssetReader.
public func start() {
self.assetReader = generateAssetReader(asset: asset, readAudio: readAudio, videoOutputSettings: videoOutputSettings, audioOutputSettings: audioOutputSettings)
asset.loadValuesAsynchronously(forKeys:["tracks"], completionHandler:{
DispatchQueue.global(priority:DispatchQueue.GlobalQueuePriority.default).async(execute: {
guard (self.asset.statusOfValue(forKey: "tracks", error:nil) == .loaded) else { return }
guard self.assetReader.startReading() else {
print("Couldn't start reading")
return
}
var readerVideoTrackOutput:AVAssetReaderOutput? = nil;
for output in self.assetReader.outputs {
if(output.mediaType == AVMediaTypeVideo) {
readerVideoTrackOutput = output;
}
}
while (self.assetReader.status == .reading) {
self.readNextVideoFrame(from:readerVideoTrackOutput!)
}
if assetReader.status == .completed {
assetReader.cancelReading()
self.assetReader = nil
if self.loop {
self.start()
} else {
self.endProcessing()
}
}
}
Would anyone have a clue into solving this looping problem? This is a link to entire code of the MovieInput class.
https://github.com/BradLarson/GPUImage2/blob/master/framework/Source/iOS/MovieInput.swift
I found the answer in case anyone is wondering.
public func createReader() -> AVAssetReader
{
var assetRead:AVAssetReader!
do{
assetRead = try AVAssetReader.init(asset: self.asset)
let outputSettings:[String:AnyObject] = [(kCVPixelBufferPixelFormatTypeKey as String):NSNumber(value:Int32(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange))]
let readerVideoTrackOutput = AVAssetReaderTrackOutput(track:self.asset.tracks(withMediaType: AVMediaTypeVideo)[0], outputSettings:outputSettings)
readerVideoTrackOutput.alwaysCopiesSampleData = false
assetRead.add(readerVideoTrackOutput)
}catch{
}
return assetRead
}
public func start() {
self.assetReader = createReader()
asset.loadValuesAsynchronously(forKeys:["tracks"], completionHandler:{
DispatchQueue.global(priority:DispatchQueue.GlobalQueuePriority.default).async(execute: {
guard (self.asset.statusOfValue(forKey: "tracks", error:nil) == .loaded) else { return }
guard self.assetReader.startReading() else {
print("Couldn't start reading")
return
}
var readerVideoTrackOutput:AVAssetReaderOutput? = nil;
for output in self.assetReader.outputs {
if(output.mediaType == AVMediaTypeVideo) {
readerVideoTrackOutput = output;
}
}
while (self.assetReader.status == .reading) {
self.readNextVideoFrame(from:readerVideoTrackOutput!)
}
if (self.assetReader.status == .completed) {
self.assetReader.cancelReading()
if (self.loop) {
// TODO: Restart movie processing
self.start()
} else {
self.endProcessing()
}
}
})
})
}
Related
I am trying to record input from the microphone and use AmplitudeTap for the same input. I can't find any examples of how to do this correctly and I am getting the following exception when calling the start function.
Terminating app due to uncaught exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: nullptr == Tap()'
When I use NodeRecorder/AmplitudeTap separately they work fine. The error happens when I try to use both NodeRecorder and AmplitudeTap and initialize them with the same engine.input node.
Here is my code:
func setup() {
if mixer == nil {
mixer = Mixer()
}
guard let input = engine.input else {
return false
}
if recorder == nil {
do {
recorder = try NodeRecorder(node: input)
} catch let err {
print("Error: \(err)")
}
}
if silencer == nil {
let silencer = Fader(input, gain: 0.0)
self.silencer = silencer
}
if !mixer.hasInput(silencer) {
mixer.addInput(silencer)
}
engine.output = mixer
tracker = AmplitudeTap(input) { amplitude in
DispatchQueue.main.async {
self.update(amplitude)
}
}
}
func release() -> Bool {
mixer.removeAllInputs()
engine.output = nil
tracker.dispose()
}
func start() {
do {
tracker.start()
try engine.start()
NodeRecorder.removeTempFiles()
try recorder?.record() // <- here the exception occurs
} catch let err {
print(err)
}
}
func stop() -> String {
engine.stop()
tracker.stop()
recorder?.stop()
if let file = recorder?.audioFile {
print("playing recorded file")
player.file = file
player.play()
}
}
I've implemented Apple's SpeechRecognizer to convert speech to text. I have multiple audio recordings so I'm creating mulitple SFSpeechRecognizer instance so that all of those are converted parallely and I've also used DispatchGroup so that I can get completion at last one's end. But I'm keep getting error kAFAssistantErrorDomain error 209.
private var dispatchGroup = DispatchGroup()
allURLs.forEach { (singleURL) in
DispatchQueue.main.async {
thisSelf.dispatchGroup.enter()
let request = SFSpeechURLRecognitionRequest(url: url)
guard let recognizer = SFSpeechRecognizer() else {
thisSelf.dispatchGroup.leave()
completion(.failure(thisSelf.speechReconInitError))
return
}
request.shouldReportPartialResults = false
if !recognizer.isAvailable {
thisSelf.dispatchGroup.leave()
return
}
recognizer.recognitionTask(with: request) { [weak thisSelf] (result, error) in
guard let reconSelf = thisSelf else { return }
if let error = error {
completion(.failure(error))
if let nsError = error as NSError? {
print("Error while transcripting audio: \(url.path), Code, Domain, Description: \(nsError.code), \(nsError.domain), \(nsError.localizedDescription)")
} else {
print("Error while transcripting audio: \(url.path), Error: \(error.localizedDescription)")
}
reconSelf.dispatchGroup.leave()
} else if let transcriptionResult = result, transcriptionResult.isFinal {
transcribedText += transcriptionResult.bestTranscription.formattedString
reconSelf.dispatchGroup.leave()
}
}
thisSelf.dispatchGroup.notify(queue: .main) {
if !transcribedText.isEmpty {
completion(transcribedText)
}
}
}
}
And If I transcribe only one audio to text at one time then I don't get any error.
TIA
I have some image files I need to get from a drone using the DJI SDK, but the way I've done it doesn't seem the most ideal. I was wondering how I would be able to do this, but inside of a for-loop instead. Here's the code:
func getImages(with file: DJIMediaFile?) {
guard let file = file else { return }
file.fetchPreview { (error) in
if error != nil {
print(String(describing: error?.localizedDescription))
return
}
guard let image = file.preview else { print("No preview"); return }
self.images.append(image)
self.imageView.image = image
self.index += 1
if self.index < self.mediaList.count {
self.getImages(with: self.mediaList[self.index])
} else {
// [...]
}
}
}
}
Any help would be appreciated by someone who's familiar with the DJI SDK and Swift (perhaps I should have used some other API method?). Thanks!
This is mostly a swift question rather than a DJI SDK issue and more related to code review. But its simply
func getNewImages() {
for file in self.mediaList where file != nil {
self.getImages(with: file!) { image, error in //not ideal to force unwarp but i don't know if the where clause supports if let binding, or if file is really nil, you'll have to check yourself in code
guard let newImage = image else { return }
self.cachedImages.append(newImage)
}
}
self.setImage(at: 0)
}
func getImages(with file: DJIMediaFile, completion: (UIImage?, Error?)->()) {
file.fetchPreview { (error) in // is this async? If its' async you may want to use dispatch groups in the for in loop to run the setImage at the correct time
if let newError = error {
print(String(describing: newError.localizedDescription))
completion(nil, error)
return
}
guard let image = file.preview else {
print("No preview")
completion(nil, nil)
return
}
completion(image, nil)
}
}
func setImage(at row: Int) {
guard row < self.cachedImages.count - 1 else { return }
self.imageView.image = self.cachedImages[row]
}
Your recursion may result in errors you don't want. This is kinda what you want to do but it should be straightforward to change based on what I can see here.
Edit: You may also be intending to replace the file as people are loading their images from the drone itself in which case the code is more like this:
func getNewImages() {
for file in self.mediaList where file != nil {
self.getImages(with: file!) { image, error in //not ideal to force unwarp but I don't know if the where clause supports if let binding, or if file is really nil, you'll have to check yourself in code
guard let newImage = image else { return }
self.imageView.image = newImage
}
}
}
func getImages(with file: DJIMediaFile, completion: (UIImage?, Error?)->()) {
file.fetchPreview { (error) in
if let newError = error {
print(String(describing: newError.localizedDescription))
completion(nil, error)
return
}
guard let image = file.preview else {
print("No preview")
completion(nil, nil)
return
}
completion(image, nil)
}
}
edit2: Also note, this is not necessarily the best way to do this, but previously is how you do it in a for loop. I think a better way would be to use map or a forEach on the mediaList array itself like so. (same getImages method, nothing changes). This could be further refined as well between protocols, extensions and what not but without knowing your program, its hard to do better recommendations.
func getNewImages() {
self.mediaList.forEach { file in
guard let newFile = file else { return }
self.getImages(with: newFile) { image, error in
guard let newImage = image else { return }
self.imageView.image = newImage
}
}
}
Use Case
I'm using iOS 11 Replaykit framework to try to record frames from the screen, and audio from both the app and the microphone.
Problem
Randomly, when I call my .append(sampleBuffer) get AVAssetWriterStatus.failed with the AssetWriter.Error showing
Error Domain=AVFoundationErrorDomain Code=-11823 "Cannot Save" UserInfo={NSLocalizedRecoverySuggestion=Try saving again., NSLocalizedDescription=Cannot Save, NSUnderlyingError=0x1c044c360 {Error Domain=NSOSStatusErrorDomain Code=-12412 "(null)"}}
Side issue: I play a repeating sound when the app is recording to try to verify the audio is recorded, but the sound stops when I start recording, even where I the video and external audio mic is working.
If you require more info, I can upload the other code to GitHub too.
Ideas
Since sometimes the recording saves (I can export to Photos app and replay the video) I think it must be async issues where I'm loading things out of order. Please let me know if you see any!
One I idea I will be trying is saving to my own folder in /Documents instead of directly to /Documents in case of weird permissions errors. Although I believe this would be causing consistent errors, instead of only sometimes breaking.
My Code
func startRecording() {
guard let firstDocumentDirectoryPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true).first else { return }
let directoryContents = try! FileManager.default.contentsOfDirectory(at: URL(fileURLWithPath: firstDocumentDirectoryPath), includingPropertiesForKeys: nil, options: [])
print(directoryContents)
videoURL = URL(fileURLWithPath: firstDocumentDirectoryPath.appending("/\(arc4random()).mp4"))
print(videoURL.absoluteString)
assetWriter = try! AVAssetWriter(url: videoURL, fileType: AVFileType.mp4)
let compressionProperties:[String:Any] = [...]
let videoSettings:[String:Any] = [...]
let audioSettings:[String:Any] = [...]
videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
audioMicInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings)
audioAppInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings)
guard let assetWriter = assetWriter else { return }
guard let videoInput = videoInput else { return }
guard let audioAppInput = audioAppInput else { return }
guard let audioMicInput = audioMicInput else { return }
videoInput.mediaTimeScale = 60
videoInput.expectsMediaDataInRealTime = true
audioMicInput.expectsMediaDataInRealTime = true
audioAppInput.expectsMediaDataInRealTime = true
if assetWriter.canAdd(videoInput) {
assetWriter.add(videoInput)
}
if assetWriter.canAdd(audioAppInput) {
assetWriter.add(audioAppInput)
}
if assetWriter.canAdd(audioMicInput) {
assetWriter.add(audioMicInput)
}
assetWriter.movieTimeScale = 60
RPScreenRecorder.shared().startCapture(handler: recordingHandler(sampleBuffer:sampleBufferType:error:)) { (error:Error?) in
if error != nil {
print("RPScreenRecorder.shared().startCapture: \(error.debugDescription)")
} else {
print("start capture complete")
}
}
}
func recordingHandler (sampleBuffer:CMSampleBuffer, sampleBufferType:RPSampleBufferType, error:Error?){
if error != nil {
print("recordingHandler: \(error.debugDescription)")
}
if CMSampleBufferDataIsReady(sampleBuffer) {
guard let assetWriter = assetWriter else { return }
guard let videoInput = videoInput else { return }
guard let audioAppInput = audioAppInput else { return }
guard let audioMicInput = audioMicInput else { return }
if assetWriter.status == AVAssetWriterStatus.unknown {
print("AVAssetWriterStatus.unknown")
if !assetWriter.startWriting() {
return
}
assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
}
if assetWriter.status == AVAssetWriterStatus.failed {
print("AVAssetWriterStatus.failed")
print("assetWriter.error: \(assetWriter.error.debugDescription)")
return
}
if sampleBufferType == RPSampleBufferType.video {
if videoInput.isReadyForMoreMediaData {
print("=appending video data")
videoInput.append(sampleBuffer)
}
}
if sampleBufferType == RPSampleBufferType.audioApp {
if audioAppInput.isReadyForMoreMediaData {
print("==appending app audio data")
audioAppInput.append(sampleBuffer)
}
}
if sampleBufferType == RPSampleBufferType.audioMic {
if audioMicInput.isReadyForMoreMediaData {
print("===appending mic audio data")
audioMicInput.append(sampleBuffer)
}
}
}
}
func stopRecording() {
RPScreenRecorder.shared().stopCapture { (error) in
guard let assetWriter = self.assetWriter else { return }
guard let videoInput = self.videoInput else { return }
guard let audioAppInput = self.audioAppInput else { return }
guard let audioMicInput = self.audioMicInput else { return }
if error != nil {
print("recordingHandler: \(error.debugDescription)")
} else {
videoInput.markAsFinished()
audioMicInput.markAsFinished()
audioAppInput.markAsFinished()
assetWriter.finishWriting(completionHandler: {
print(self.videoURL)
self.saveToCameraRoll(URL: self.videoURL)
})
}
}
}
I got it to work. I believe it was indeed an async issue. The problem, for some reason is you must make sure
assetWriter.startWriting()
assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
happen strictly serially.
Change your code from this:
if assetWriter.status == AVAssetWriterStatus.unknown {
print("AVAssetWriterStatus.unknown")
if !assetWriter.startWriting() {
return
}
assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
}
to this:
DispatchQueue.main.async { [weak self] in
if self?.assetWriter.status == AVAssetWriterStatus.unknown {
print("AVAssetWriterStatus.unknown")
if !self?.assetWriter.startWriting() {
return
}
self?.assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
}
}
Or even better, the whole block inside CMSampleBufferDataIsReady ie.
if CMSampleBufferDataIsReady(sampleBuffer) {
DispatchQueue.main.async { [weak self] in
...
...
}
}
Let me know if it works!
I had similar issue. I fixed it by first check whether videoURL file is already existed. If so, remove it first then the error will go away.
I was looking how to turn on/off the iPhone's camera flash and I found this:
#IBAction func didTouchFlashButton(sender: AnyObject) {
let avDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
// check if the device has torch
if avDevice.hasTorch {
// lock your device for configuration
avDevice.lockForConfiguration(nil)
// check if your torchMode is on or off. If on turns it off otherwise turns it on
if avDevice.torchActive {
avDevice.torchMode = AVCaptureTorchMode.Off
} else {
// sets the torch intensity to 100%
avDevice.setTorchModeOnWithLevel(1.0, error: nil)
}
// unlock your device
avDevice.unlockForConfiguration()
}
}
I do get 2 issues, one on the line:
avDevice.lockForConfiguration(nil)
and the other on the line:
avDevice.setTorchModeOnWithLevel(1.0, error:nil)
both of them are related to exception handling but I don't know how to resolve them.
#IBAction func didTouchFlashButton(sender: UIButton) {
let avDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
// check if the device has torch
if avDevice.hasTorch {
// lock your device for configuration
do {
let abv = try avDevice.lockForConfiguration()
} catch {
print("aaaa")
}
// check if your torchMode is on or off. If on turns it off otherwise turns it on
if avDevice.torchActive {
avDevice.torchMode = AVCaptureTorchMode.Off
} else {
// sets the torch intensity to 100%
do {
let abv = try avDevice.setTorchModeOnWithLevel(1.0)
} catch {
print("bbb")
}
// avDevice.setTorchModeOnWithLevel(1.0, error: nil)
}
// unlock your device
avDevice.unlockForConfiguration()
}
}
Swift 4 version, adapted from Ivan Slavov's answer. "TorchMode.auto" is also an option if you want to get fancy.
#IBAction func didTouchFlashButton(_ sender: Any) {
if let avDevice = AVCaptureDevice.default(for: AVMediaType.video) {
if (avDevice.hasTorch) {
do {
try avDevice.lockForConfiguration()
} catch {
print("aaaa")
}
if avDevice.isTorchActive {
avDevice.torchMode = AVCaptureDevice.TorchMode.off
} else {
avDevice.torchMode = AVCaptureDevice.TorchMode.on
}
}
// unlock your device
avDevice.unlockForConfiguration()
}
}
Swift 5.4 &
Xcode 12.4 &
iOS 14.4.2
#objc private func flashEnableButtonAction() {
guard let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) else {
return
}
if captureDevice.hasTorch {
do {
let _: () = try captureDevice.lockForConfiguration()
} catch {
print("aaaa")
}
if captureDevice.isTorchActive {
captureDevice.torchMode = AVCaptureDevice.TorchMode.off
} else {
do {
let _ = try captureDevice.setTorchModeOn(level: 1.0)
} catch {
print("bbb")
}
}
captureDevice.unlockForConfiguration()
}
}
for some reason "avDevice.torchActive" is always false, even when the torch is on, making it impossible to turn off but I fixed it by declaring a boolean initially set to false and every time the flash turns on, the boolean is set to true.
var on: Bool = false
#IBAction func didTouchFlashButton(sender: UIButton) {
let avDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
// check if the device has torch
if avDevice.hasTorch {
// lock your device for configuration
do {
let abv = try avDevice.lockForConfiguration()
} catch {
print("aaaa")
}
// check if your torchMode is on or off. If on turns it off otherwise turns it on
if on == true {
avDevice.torchMode = AVCaptureTorchMode.Off
on = false
} else {
// sets the torch intensity to 100%
do {
let abv = try avDevice.setTorchModeOnWithLevel(1.0)
on = true
} catch {
print("bbb")
}
// avDevice.setTorchModeOnWithLevel(1.0, error: nil)
}
// unlock your device
avDevice.unlockForConfiguration()
}
}
import AVFoundation
var videoDeviceInput: AVCaptureDeviceInput?
var movieFileOutput: AVCaptureMovieFileOutput?
var stillImageOutput: AVCaptureStillImageOutput?
Add a class method to ViewController.
class func setFlashMode(flashMode: AVCaptureFlashMode, device: AVCaptureDevice){
if device.hasFlash && device.isFlashModeSupported(flashMode) {
var error: NSError? = nil
do {
try device.lockForConfiguration()
device.flashMode = flashMode
device.unlockForConfiguration()
} catch let error1 as NSError {
error = error1
print(error)
}
}
}
Check the flashmode status.
// Flash set to Auto/Off for Still Capture
print("flashMode.rawValue : \(self.videoDeviceInput!.device.flashMode.rawValue)")
if(self.videoDeviceInput!.device.flashMode.rawValue == 1)
{
CameraViewController.setFlashMode(AVCaptureFlashMode.On, device: self.videoDeviceInput!.device)
}
else if (self.videoDeviceInput!.device.flashMode.rawValue == 2)
{
CameraViewController.setFlashMode(AVCaptureFlashMode.Auto, device: self.videoDeviceInput!.device)
}
else
{
CameraViewController.setFlashMode(AVCaptureFlashMode.Off, device: self.videoDeviceInput!.device)
}
Another short way is to do this
let devices = AVCaptureDevice.devices()
let device = devices[0]
guard device.isTorchAvailable else { return }
do {
try device.lockForConfiguration()
if device.torchMode == .on {
device.torchMode = .off
}else{
device.torchMode = .on
}
} catch {
debugPrint(error)
}