Playing back audio recorded with AVCaptureAudioDataOutput - ios

I'm trying to record video and audio and sending them over the network so that they can be played back in real time on other clients. I've managed to record and play back video successfully, but audio still cannot be played back (see AVAudioPlayer at the bottom of the code below). What am I doing wrong or what is missing? (There are a couple of other StackOverflow questions which seem to address the same issue, but even if the comments there show that some people were able to make it work, none of them show an explicit, working answer.) Thank you in advance for any input.
let captureSession = AVCaptureSession()
private func startVideoFeed() {
let sessionPreset = AVCaptureSession.Preset.low
if captureSession.canSetSessionPreset(sessionPreset) {
captureSession.sessionPreset = sessionPreset
}
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video) { success in
self.startVideoFeed()
}
case .authorized:
captureSession.beginConfiguration()
let captureVideoDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)!
let captureVideoInput = try! AVCaptureDeviceInput(device: captureVideoDevice)
if captureSession.canAddInput(captureVideoInput) {
captureSession.addInput(captureVideoInput)
}
let captureVideoOutput = AVCaptureVideoDataOutput()
captureVideoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
if captureSession.canAddOutput(captureVideoOutput) {
captureSession.addOutput(captureVideoOutput)
}
captureSession.commitConfiguration()
captureSession.startRunning()
default:
break
}
}
private func startAudioFeed() {
switch AVCaptureDevice.authorizationStatus(for: .audio) {
case .notDetermined:
AVCaptureDevice.requestAccess(for: .audio) { success in
self.startAudioFeed()
}
case .authorized:
captureSession.beginConfiguration()
let captureAudioDevice = AVCaptureDevice.default(for: .audio)!
let captureAudioInput = try! AVCaptureDeviceInput(device: captureAudioDevice)
if captureSession.canAddInput(captureAudioInput) {
captureSession.addInput(captureAudioInput)
}
let captureAudioOutput = AVCaptureAudioDataOutput()
captureAudioOutput.audioSettings = [AVFormatIDKey: kAudioFormatLinearPCM, AVNumberOfChannelsKey: NSNumber(value: 1), AVSampleRateKey: NSNumber(value: 44100)]
captureAudioOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
if captureSession.canAddOutput(captureAudioOutput) {
captureSession.addOutput(captureAudioOutput)
}
captureSession.commitConfiguration()
default:
break
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if let imageBuffer = sampleBuffer.imageBuffer {
let ciImage = CIImage(cvPixelBuffer: imageBuffer)
let cgImage = CIContext().createCGImage(ciImage, from: ciImage.extent)!
let data = CFDataCreateMutable(nil, 0)!
let imageDestination = CGImageDestinationCreateWithData(data, kUTTypeJPEG, 1, nil)!
CGImageDestinationAddImage(imageDestination, cgImage, [kCGImageDestinationLossyCompressionQuality: NSNumber(value: 0)] as CFDictionary)
CGImageDestinationFinalize(imageDestination)
play(data: data as Data)
} else if let dataBuffer = sampleBuffer.dataBuffer {
let data = try! dataBuffer.dataBytes()
play(data: data)
}
}
private func play(data: Data) {
if let image = CGImage(jpegDataProviderSource: CGDataProvider(data: data as CFData)!, decode: nil, shouldInterpolate: false, intent: .defaultIntent) {
// image is a valid image
} else if let audioPlayer = try? AVAudioPlayer(data: data) {
audioPlayer.play()
// audioPlayer is always nil with error: Error Domain=NSOSStatusErrorDomain Code=1954115647 "(null)"
}
}

https://developer.apple.com/documentation/coremedia/1489629-cmsamplebuffergetdatabuffer
The caller does not own the returned dataBuffer, and must retain it explicitly if the caller needs to maintain a reference to it.
You will need to do a CFRetain and CFRelease on the data.
The player is nil because of the data you are initializing it with. Convert sampleBuffer to Data.
NSOSStatusErrorDomain Code=1954115647

Related

Play & Mix Background Audio with Video Camera Recording

I'm currently in the process of debugging my video camera model that I'm using to record video and audio. I would like the video camera to continue playing background audio if there is something play and record using the mic over the audio. I initially got my av capture session to work smoothly as intended by adding the microphone input on set up which automatically stops playing background audio when the camera view is set up.
I have been working on the following solution where I add the audio input only when I start recording and attempt to remove audio input once I stop recording. Here is my current code:
import SwiftUI
import AVFoundation
// MARK: Camera View Model
class CameraViewModel: NSObject,ObservableObject,AVCaptureFileOutputRecordingDelegate, AVCapturePhotoCaptureDelegate{
#Published var session = AVCaptureSession()
#Published var alert = false
#Published var output = AVCaptureMovieFileOutput()
#Published var preview : AVCaptureVideoPreviewLayer!
// MARK: Video Recorder Properties
#Published var isRecording: Bool = false
#Published var recordedURLs: [URL] = []
#Published var previewURL: URL?
#Published var showPreview: Bool = false
// Set up is called after necessary permissions are acquired
func setUp(){
do{
self.session.beginConfiguration()
let cameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)
if cameraDevice != nil {
/* old code that added audio input on open that worked as intended
let videoInput = try AVCaptureDeviceInput(device: cameraDevice!)
let audioDevice = AVCaptureDevice.default(for: .audio)
let audioInput = try AVCaptureDeviceInput(device: audioDevice!)
if self.session.canAddInput(videoInput) && self.session.canAddInput(audioInput){ //MARK: Audio Input
self.session.addInput(videoInput)
self.session.addInput(audioInput)
self.videoDeviceInput = videoInput
} */
// new code that only adds video input
if self.session.canAddInput(videoInput) {
self.session.addInput(videoInput)
self.videoDeviceInput = videoInput
}
if self.session.canAddOutput(self.output){
self.session.addOutput(self.output)
}
if self.session.canAddOutput(self.photoOutput){
self.session.addOutput(self.photoOutput)
}
//for audio mixing, make sure this is default set to true
self.session.automaticallyConfiguresApplicationAudioSession = true
self.session.commitConfiguration()
}
}
catch{
print(error.localizedDescription)
}
}
//start recording is called upon a user input which now attaches the mic input
func startRecording() {
// here is how I'm mixing the background audio and adding the microphone input when the camera starts recording
do
{
try AVAudioSession.sharedInstance().setActive(false)
try AVAudioSession.sharedInstance().setCategory(AVAudioSession.Category.ambient)
try AVAudioSession.sharedInstance().setCategory(.playAndRecord, mode: .default, options: AVAudioSession.CategoryOptions.mixWithOthers)
try AVAudioSession.sharedInstance().setMode(AVAudioSession.Mode.videoRecording)
try AVAudioSession.sharedInstance().setActive(true)
let audioDevice = AVCaptureDevice.default(for: .audio)
let audioInput = try AVCaptureDeviceInput(device: audioDevice!)
if self.session.canAddInput(audioInput){
self.session.automaticallyConfiguresApplicationAudioSession = false
self.session.addInput(audioInput)
}
} catch {
print("Can't Set Audio Session Category: \(error)")
}
// MARK: Temporary URL for recording Video
let tempURL = NSTemporaryDirectory() + "\(Date()).mov"
//Need to correct image orientation before moving further
if let videoOutputConnection = output.connection(with: .video) {
//For frontCamera settings to capture mirror image
if self.videoDeviceInput.device.position == .front {
videoOutputConnection.automaticallyAdjustsVideoMirroring = false
videoOutputConnection.isVideoMirrored = true
} else {
videoOutputConnection.automaticallyAdjustsVideoMirroring = true
}
}
output.startRecording(to: URL(fileURLWithPath: tempURL), recordingDelegate: self)
isRecording = true
}
//stop recording removes the audio input
func stopRecording(){
output.stopRecording()
isRecording = false
self.flashOn = false
// stop recording is where I believe I'm doing something wrong when I remove the audio input
do{
try AVAudioSession.sharedInstance().setActive(false)
let audioDevice = AVCaptureDevice.default(for: .audio)
let audioInput = try AVCaptureDeviceInput(device: audioDevice!)
self.session.removeInput(audioInput)
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setCategory(.ambient, mode: .default, options: [.mixWithOthers])
try AVAudioSession.sharedInstance().setActive(true)
} catch {
print("Error occurred while removing audio device input: \(error)")
}
}
}
I also added the following necessary lines in my AppDelegate launch method as well
/*
below is for mixing audio
*/
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(.ambient, mode: .default, options: [.mixWithOthers])
} catch {
print("Failed to set audio session category.")
}
I believe I'm going on the correct track as the first time the app opens, background audio plays smoothly and there is a small camera flash but once I start recording, it mixes the background audio well with the phone audio input as well. I was able to see this in the preview in a new view. However, once I dismiss the preview of the recorded url and go back to the camera, the phone audio mic input stops working completely.
I also receive this error in my console:
AVAudioSession_iOS.mm:1271 Deactivating an audio session that has running I/O. All I/O should be stopped or paused prior to deactivating the audio session.
When I looked online, it said to stop or pause AVPlayer but I'm unsure where I'm even using an AVPlayer session here. I also noticed that people suggested creating two capture sessions for the audio and video but I was struggling to get that working as well, so went ahead with this option.
Editing for minimal reproducible example:
Here is the camera view model and I've attached the necessary views in a separate answer:
import SwiftUI
import AVFoundation
// MARK: Camera View Model
class CameraViewModel: NSObject,ObservableObject,AVCaptureFileOutputRecordingDelegate, AVCapturePhotoCaptureDelegate{
#Published var session = AVCaptureSession()
#Published var alert = false
#Published var output = AVCaptureMovieFileOutput()
#Published var preview : AVCaptureVideoPreviewLayer!
// MARK: Video Recorder Properties
#Published var isRecording: Bool = false
#Published var recordedURLs: [URL] = []
#Published var previewURL: URL?
#Published var showPreview: Bool = false
// Top Progress Bar
#Published var recordedDuration: CGFloat = 0
// Maximum 15 seconds
#Published var maxDuration: CGFloat = 15
//for photo
// since were going to read pic data....
#Published var photoOutput = AVCapturePhotoOutput()
#Published var isTaken = false
#Published var picData = Data(count: 0)
#Published var thumbnailData = Data(count: 0)
#Published var flashOn = false
#objc dynamic var videoDeviceInput: AVCaptureDeviceInput!
private let sessionQueue = DispatchQueue(label: "session queue")
// MARK: Device Configuration Properties
private let videoDeviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera, .builtInDualCamera, .builtInTrueDepthCamera], mediaType: .video, position: .unspecified)
#AppStorage("camerapermission") var camerapermission = 0
func checkPermission(){
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized:
self.checkAudioPermission()
return
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video) { (status) in
if status{
self.checkAudioPermission()
}
}
case .denied:
self.camerapermission = 2
self.alert.toggle()
return
default:
return
}
}
func checkAudioPermission() {
switch AVAudioSession.sharedInstance().recordPermission {
case .granted :
print("permission granted")
self.camerapermission = 1
setUp()
case .denied:
print("permission denied")
self.camerapermission = 2
self.alert.toggle()
case .undetermined:
print("request permission here")
AVAudioSession.sharedInstance().requestRecordPermission({ granted in
if granted {
print("permission granted here")
DispatchQueue.main.async {
self.camerapermission = 1
}
self.setUp()
}
})
default:
print("unknown")
}
}
func setUp(){
do{
self.session.beginConfiguration()
let cameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)
if cameraDevice != nil {
let videoInput = try AVCaptureDeviceInput(device: cameraDevice!)
// let audioDevice = AVCaptureDevice.default(for: .audio)
// let audioInput = try AVCaptureDeviceInput(device: audioDevice!)
// if self.session.canAddInput(videoInput) && self.session.canAddInput(audioInput){ //MARK: Audio Input
// self.session.addInput(videoInput)
// self.session.addInput(audioInput)
// self.videoDeviceInput = videoInput
// }
//
/* mixing code buggy */
if self.session.canAddInput(videoInput) {
self.session.addInput(videoInput)
self.videoDeviceInput = videoInput
}
if self.session.canAddOutput(self.output){
self.session.addOutput(self.output)
}
if self.session.canAddOutput(self.photoOutput){
self.session.addOutput(self.photoOutput)
}
//for audio mixing, make sure this is default set to true
self.session.automaticallyConfiguresApplicationAudioSession = true
self.session.commitConfiguration()
}
}
catch{
print(error.localizedDescription)
}
}
public func set(zoom: CGFloat){
let factor = zoom < 1 ? 1 : zoom
let device = self.videoDeviceInput.device
do {
try device.lockForConfiguration()
device.videoZoomFactor = factor
device.unlockForConfiguration()
}
catch {
print(error.localizedDescription)
}
}
func changeCamera() {
sessionQueue.async {
if self.videoDeviceInput != nil {
let currentVideoDevice = self.videoDeviceInput.device
let currentPosition = currentVideoDevice.position
let preferredPosition: AVCaptureDevice.Position
let preferredDeviceType: AVCaptureDevice.DeviceType
switch currentPosition {
case .unspecified, .front:
preferredPosition = .back
preferredDeviceType = .builtInWideAngleCamera
case .back:
preferredPosition = .front
preferredDeviceType = .builtInWideAngleCamera
#unknown default:
print("Unknown capture position. Defaulting to back, dual-camera.")
preferredPosition = .back
preferredDeviceType = .builtInWideAngleCamera
}
let devices = self.videoDeviceDiscoverySession.devices
var newVideoDevice: AVCaptureDevice? = nil
// First, seek a device with both the preferred position and device type. Otherwise, seek a device with only the preferred position.
if let device = devices.first(where: { $0.position == preferredPosition && $0.deviceType == preferredDeviceType }) {
newVideoDevice = device
} else if let device = devices.first(where: { $0.position == preferredPosition }) {
newVideoDevice = device
}
if let videoDevice = newVideoDevice {
do {
let videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
self.session.beginConfiguration()
// Remove the existing device input first, because AVCaptureSession doesn't support
// simultaneous use of the rear and front cameras.
self.session.removeInput(self.videoDeviceInput)
// MARK: Audio Input
if self.session.canAddInput(videoDeviceInput){
self.session.addInput(videoDeviceInput)
self.videoDeviceInput = videoDeviceInput
}
if self.session.canAddOutput(self.output){
self.session.addOutput(self.output)
}
if self.session.canAddOutput(self.photoOutput){
self.session.addOutput(self.photoOutput)
}
self.session.commitConfiguration()
} catch {
print("Error occurred while creating video device input: \(error)")
}
}
}
}
}
// take and retake functions...
func switchFlash() {
self.flashOn.toggle()
}
func takePic(){
let settings = AVCapturePhotoSettings()
if flashOn {
settings.flashMode = .on
} else {
settings.flashMode = .off
}
//Need to correct image orientation before moving further
if let photoOutputConnection = photoOutput.connection(with: .video) {
//For frontCamera settings to capture mirror image
if self.videoDeviceInput.device.position == .front {
photoOutputConnection.automaticallyAdjustsVideoMirroring = false
photoOutputConnection.isVideoMirrored = true
} else {
photoOutputConnection.automaticallyAdjustsVideoMirroring = true
}
}
self.photoOutput.capturePhoto(with: settings, delegate: self)
print("retaking a photo taken...")
DispatchQueue.global(qos: .background).async {
//self.session.stopRunning()
DispatchQueue.main.async {
withAnimation{self.isTaken.toggle()}
}
}
}
func reTake(){
DispatchQueue.global(qos: .background).async {
self.session.startRunning()
DispatchQueue.main.async {
withAnimation{self.isTaken.toggle()}
//clearing ...
self.flashOn = false
self.picData = Data(count: 0)
}
}
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if error != nil{
return
}
print("pic taken...")
guard let imageData = photo.fileDataRepresentation() else{return}
self.picData = imageData
}
func startRecording() {
/* mixing code buggy */
do
{
try AVAudioSession.sharedInstance().setActive(false)
try AVAudioSession.sharedInstance().setCategory(AVAudioSession.Category.ambient)
try AVAudioSession.sharedInstance().setCategory(.playAndRecord, mode: .default, options: AVAudioSession.CategoryOptions.mixWithOthers)
try AVAudioSession.sharedInstance().setMode(AVAudioSession.Mode.videoRecording)
try AVAudioSession.sharedInstance().setActive(true)
let audioDevice = AVCaptureDevice.default(for: .audio)
let audioInput = try AVCaptureDeviceInput(device: audioDevice!)
if self.session.canAddInput(audioInput){
self.session.automaticallyConfiguresApplicationAudioSession = false
self.session.addInput(audioInput)
}
} catch {
print("Can't Set Audio Session Category: \(error)")
}
// MARK: Temporary URL for recording Video
let tempURL = NSTemporaryDirectory() + "\(Date()).mov"
//Need to correct image orientation before moving further
if let videoOutputConnection = output.connection(with: .video) {
//For frontCamera settings to capture mirror image
if self.videoDeviceInput.device.position == .front {
videoOutputConnection.automaticallyAdjustsVideoMirroring = false
videoOutputConnection.isVideoMirrored = true
} else {
videoOutputConnection.automaticallyAdjustsVideoMirroring = true
}
}
output.startRecording(to: URL(fileURLWithPath: tempURL), recordingDelegate: self)
isRecording = true
}
func stopRecording(){
output.stopRecording()
isRecording = false
self.flashOn = false
/* mixing code buggy */
do{
try AVAudioSession.sharedInstance().setActive(false)
let audioDevice = AVCaptureDevice.default(for: .audio)
let audioInput = try AVCaptureDeviceInput(device: audioDevice!)
self.session.removeInput(audioInput)
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setCategory(.ambient, mode: .default, options: [.mixWithOthers])
try AVAudioSession.sharedInstance().setActive(true)
} catch {
print("Error occurred while removing audio device input: \(error)")
}
}
func generateThumbnail() {
let image = self.imageFromVideo(url: previewURL!, at: 0)
DispatchQueue.main.async {
self.thumbnailData = image?.pngData() ?? Data(count: 0)
}
}
func imageFromVideo(url: URL, at time: TimeInterval) -> UIImage? {
let asset = AVURLAsset(url: url)
let assetIG = AVAssetImageGenerator(asset: asset)
assetIG.appliesPreferredTrackTransform = true
assetIG.apertureMode = AVAssetImageGenerator.ApertureMode.encodedPixels
let cmTime = CMTime(seconds: time, preferredTimescale: 60)
let thumbnailImageRef: CGImage
do {
thumbnailImageRef = try assetIG.copyCGImage(at: cmTime, actualTime: nil)
print("SUCESS: THUMBNAIL")
} catch let error {
print("Error: \(error)")
return nil
}
return UIImage(cgImage: thumbnailImageRef)
}
func restartSession() {
if !self.session.isRunning {
DispatchQueue.global(qos: .background).async {
self.session.startRunning()
}
}
}
func stopSession() {
// DispatchQueue.global(qos: .background).async {
self.session.stopRunning()
// }
}
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
if let error = error {
print(error.localizedDescription)
return
}
// CREATED SUCCESSFULLY
print(outputFileURL)
guard let data = try? Data(contentsOf: outputFileURL) else {
return
}
print("File size before compression: \(Double(data.count / 1048576)) mb")
self.recordedURLs.append(outputFileURL)
if self.recordedURLs.count == 1{
self.previewURL = outputFileURL
self.generateThumbnail()
return
}
/*
Below code can be ignored because only recording one url
*/
// CONVERTING URLs TO ASSETS
let assets = recordedURLs.compactMap { url -> AVURLAsset in
return AVURLAsset(url: url)
}
self.previewURL = nil
// MERGING VIDEOS
mergeVideos(assets: assets) { exporter in
exporter.exportAsynchronously {
if exporter.status == .failed{
// HANDLE ERROR
print(exporter.error!)
}
else{
if let finalURL = exporter.outputURL{
print(finalURL)
DispatchQueue.main.async {
self.previewURL = finalURL
print("inside final url")
}
}
}
}
}
}
func mergeVideos(assets: [AVURLAsset],completion: #escaping (_ exporter: AVAssetExportSession)->()){
let compostion = AVMutableComposition()
var lastTime: CMTime = .zero
guard let videoTrack = compostion.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else{return}
guard let audioTrack = compostion.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else{return}
for asset in assets {
// Linking Audio and Video
do{
try videoTrack.insertTimeRange(CMTimeRange(start: .zero, duration: asset.duration), of: asset.tracks(withMediaType: .video)[0], at: lastTime)
// Safe Check if Video has Audio
if !asset.tracks(withMediaType: .audio).isEmpty{
try audioTrack.insertTimeRange(CMTimeRange(start: .zero, duration: asset.duration), of: asset.tracks(withMediaType: .audio)[0], at: lastTime)
}
}
catch{
// HANDLE ERROR
print(error.localizedDescription)
}
// Updating Last Time
lastTime = CMTimeAdd(lastTime, asset.duration)
}
// MARK: Temp Output URL
let tempURL = URL(fileURLWithPath: NSTemporaryDirectory() + "Reel-\(Date()).mp4")
// VIDEO IS ROTATED
// BRINGING BACK TO ORIGNINAL TRANSFORM
let layerInstructions = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
// MARK: Transform
var transform = CGAffineTransform.identity
transform = transform.rotated(by: 90 * (.pi / 180))
transform = transform.translatedBy(x: 0, y: -videoTrack.naturalSize.height)
layerInstructions.setTransform(transform, at: .zero)
let instructions = AVMutableVideoCompositionInstruction()
instructions.timeRange = CMTimeRange(start: .zero, duration: lastTime)
instructions.layerInstructions = [layerInstructions]
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = CGSize(width: videoTrack.naturalSize.height, height: videoTrack.naturalSize.width)
videoComposition.instructions = [instructions]
videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
guard let exporter = AVAssetExportSession(asset: compostion, presetName: AVAssetExportPresetHighestQuality) else{return}
exporter.outputFileType = .mp4
exporter.outputURL = tempURL
exporter.videoComposition = videoComposition
completion(exporter)
}
func compressVideo(inputURL: URL,
outputURL: URL,
handler:#escaping (_ exportSession: AVAssetExportSession?) -> Void) {
let urlAsset = AVURLAsset(url: inputURL, options: nil)
guard let exportSession = AVAssetExportSession(asset: urlAsset,
presetName: AVAssetExportPresetMediumQuality) else {
handler(nil)
return
}
exportSession.outputURL = outputURL
exportSession.outputFileType = .mp4
exportSession.exportAsynchronously {
handler(exportSession)
}
}
}
You can ignore the merge videos code as there is only one recorded url but right now, you should be able to run this code if you've added camera and microphone permissions to your info.plist.
It currently has the buggy mixing code where background audio does work the first time but after restarting audio session, it no longer works. Any help would be greatly appreciated!

How do you create a new AVAsset video that consists of only frames from given `CMTimeRange`s of another video?

Apple's sample code Identifying Trajectories in Video contains the following delegate callback:
func cameraViewController(_ controller: CameraViewController, didReceiveBuffer buffer: CMSampleBuffer, orientation: CGImagePropertyOrientation) {
let visionHandler = VNImageRequestHandler(cmSampleBuffer: buffer, orientation: orientation, options: [:])
if gameManager.stateMachine.currentState is GameManager.TrackThrowsState {
DispatchQueue.main.async {
// Get the frame of rendered view
let normalizedFrame = CGRect(x: 0, y: 0, width: 1, height: 1)
self.jointSegmentView.frame = controller.viewRectForVisionRect(normalizedFrame)
self.trajectoryView.frame = controller.viewRectForVisionRect(normalizedFrame)
}
// Perform the trajectory request in a separate dispatch queue.
trajectoryQueue.async {
do {
try visionHandler.perform([self.detectTrajectoryRequest])
if let results = self.detectTrajectoryRequest.results {
DispatchQueue.main.async {
self.processTrajectoryObservations(controller, results)
}
}
} catch {
AppError.display(error, inViewController: self)
}
}
}
}
However, instead of drawing UI whenever detectTrajectoryRequest.results exist (https://developer.apple.com/documentation/vision/vndetecttrajectoriesrequest/3675672-results), I'm interested in using the CMTimeRange provided by each result to construct a new video. In effect, this would filter down the original video to only frames with trajectories.
What would be a good approach to transferring only frames with trajectories from an AVAssetReader to an AVAssetWriter?
By the time you identify a trajectory in captured video frames or from frames decoded from a file you may not have the initial frames in memory any more, so the easiest way to create your file containing only trajectories is to keep the original file on hand, and then insert its trajectory snippets into an AVComposition which you then export using AVAssetExportSession.
This sample captures frames from the camera, encodes them to a file whilst analysing them for trajectories and after 20 seconds, it closes the file and then creates the new file containing only trajectory snippets.
If you're interested in detecting trajectories in a pre-existing file, it's not too hard to rewire this code.
import UIKit
import AVFoundation
import Vision
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let session = AVCaptureSession()
var assetWriter: AVAssetWriter!
var assetWriterInput: AVAssetWriterInput!
var assetWriterStartTime: CMTime = .zero
var assetWriterStarted = false
var referenceFileURL: URL!
var timeRangesOfInterest: [Double : CMTimeRange] = [:]
func startWritingFile(outputURL: URL, initialSampleBuffer: CMSampleBuffer) {
try? FileManager.default.removeItem(at: outputURL)
assetWriter = try! AVAssetWriter(outputURL: outputURL, fileType: .mov)
let dimensions = initialSampleBuffer.formatDescription!.dimensions
assetWriterInput = AVAssetWriterInput(mediaType: .video, outputSettings: [AVVideoCodecKey: AVVideoCodecType.h264, AVVideoWidthKey: dimensions.width, AVVideoHeightKey: dimensions.height])
assetWriter.add(assetWriterInput)
assetWriter.startWriting()
self.assetWriterStartTime = CMSampleBufferGetPresentationTimeStamp(initialSampleBuffer)
assetWriter.startSession(atSourceTime: self.assetWriterStartTime)
}
func stopWritingFile(completion: #escaping (() -> Void)) {
let assetWriterToFinish = self.assetWriter!
self.assetWriterInput = nil
self.assetWriter = nil
assetWriterToFinish.finishWriting {
print("finished writing: \(assetWriterToFinish.status.rawValue)")
completion()
}
}
func exportVideoTimeRanges(inputFileURL: URL, outputFileURL: URL, timeRanges: [CMTimeRange]) {
let inputAsset = AVURLAsset(url: inputFileURL)
let inputVideoTrack = inputAsset.tracks(withMediaType: .video).first!
let composition = AVMutableComposition()
let compositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)!
var insertionPoint: CMTime = .zero
for timeRange in timeRanges {
try! compositionTrack.insertTimeRange(timeRange, of: inputVideoTrack, at: insertionPoint)
insertionPoint = insertionPoint + timeRange.duration
}
let exportSession = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality)!
try? FileManager.default.removeItem(at: outputFileURL)
exportSession.outputURL = outputFileURL
exportSession.outputFileType = .mov
exportSession.exportAsynchronously {
print("export finished: \(exportSession.status.rawValue) - \(exportSession.error)")
}
}
override func viewDidLoad() {
super.viewDidLoad()
let inputDevice = AVCaptureDevice.default(for: .video)!
let input = try! AVCaptureDeviceInput(device: inputDevice)
let output = AVCaptureVideoDataOutput()
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session.addInput(input)
session.addOutput(output)
session.startRunning()
DispatchQueue.main.asyncAfter(deadline: .now() + 20) {
self.stopWritingFile {
print("finished writing")
let trajectoriesFileURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] .appendingPathComponent("trajectories.mov")
self.exportVideoTimeRanges(inputFileURL: self.referenceFileURL, outputFileURL: trajectoriesFileURL, timeRanges: self.timeRangesOfInterest.map { $0.1 })
}
}
}
// Lazily create a single instance of VNDetectTrajectoriesRequest.
private lazy var request: VNDetectTrajectoriesRequest = {
return VNDetectTrajectoriesRequest(frameAnalysisSpacing: .zero,
trajectoryLength: 10,
completionHandler: completionHandler)
}()
// AVCaptureVideoDataOutputSampleBufferDelegate callback.
func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
if !assetWriterStarted {
self.referenceFileURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] .appendingPathComponent("reference.mov")
startWritingFile(outputURL: self.referenceFileURL, initialSampleBuffer: sampleBuffer)
assetWriterStarted = true
}
if assetWriterInput != nil && assetWriterInput.isReadyForMoreMediaData {
assetWriterInput.append(sampleBuffer)
}
do {
let requestHandler = VNImageRequestHandler(cmSampleBuffer: sampleBuffer)
try requestHandler.perform([request])
} catch {
// Handle the error.
}
}
func completionHandler(request: VNRequest, error: Error?) {
guard let request = request as? VNDetectTrajectoriesRequest else { return }
if let results = request.results,
results.count > 0 {
NSLog("\(results)")
for result in results {
var fileRelativeTimeRange = result.timeRange
fileRelativeTimeRange.start = fileRelativeTimeRange.start - self.assetWriterStartTime
self.timeRangesOfInterest[fileRelativeTimeRange.start.seconds] = fileRelativeTimeRange
}
}
}
}

Remove AVAssetWriter's First Black/Blank Frame

I have an avassetwriter to record a video with an applied filter to then play back via avqueueplayer.
My issue is, on play back, the recorded video displays a black/blank screen for the first frame. To my understanding, this is due to the writer capturing audio before capturing the first actual video frame.
To attempt to resolve, I had placed a boolean check when appending to the audio writer input whether the first video frame was appended to the adapter. That said, I still saw a black frame on playback despite having printed out the timestamps, which showed video having preceded audio...I also tried to put a check to start the write session when output == video, but ended up with the same result.
Any guidance or other workaround would be appreciated.
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let timestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer).seconds
if output == _videoOutput {
if connection.isVideoOrientationSupported { connection.videoOrientation = .portrait }
guard let cvImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let ciImage = CIImage(cvImageBuffer: cvImageBuffer)
guard let filteredCIImage = applyFilters(inputImage: ciImage) else { return }
self.ciImage = filteredCIImage
guard let cvPixelBuffer = getCVPixelBuffer(from: filteredCIImage) else { return }
self.cvPixelBuffer = cvPixelBuffer
self.ciContext.render(filteredCIImage, to: cvPixelBuffer, bounds: filteredCIImage.extent, colorSpace: CGColorSpaceCreateDeviceRGB())
metalView.draw()
}
switch _captureState {
case .start:
guard let outputUrl = tempURL else { return }
let writer = try! AVAssetWriter(outputURL: outputUrl, fileType: .mp4)
let videoSettings = _videoOutput!.recommendedVideoSettingsForAssetWriter(writingTo: .mp4)
let videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
videoInput.mediaTimeScale = CMTimeScale(bitPattern: 600)
videoInput.expectsMediaDataInRealTime = true
let pixelBufferAttributes = [
kCVPixelBufferCGImageCompatibilityKey: NSNumber(value: true),
kCVPixelBufferCGBitmapContextCompatibilityKey: NSNumber(value: true),
kCVPixelBufferPixelFormatTypeKey: NSNumber(value: Int32(kCVPixelFormatType_32ARGB))
] as [String:Any]
let adapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: pixelBufferAttributes)
if writer.canAdd(videoInput) { writer.add(videoInput) }
let audioSettings = _audioOutput!.recommendedAudioSettingsForAssetWriter(writingTo: .mp4) as? [String:Any]
let audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings)
audioInput.expectsMediaDataInRealTime = true
if writer.canAdd(audioInput) { writer.add(audioInput) }
_filename = outputUrl.absoluteString
_assetWriter = writer
_assetWriterVideoInput = videoInput
_assetWriterAudioInput = audioInput
_adapter = adapter
_captureState = .capturing
_time = timestamp
writer.startWriting()
writer.startSession(atSourceTime: CMTime(seconds: timestamp, preferredTimescale: CMTimeScale(600)))
case .capturing:
if output == _videoOutput {
if _assetWriterVideoInput?.isReadyForMoreMediaData == true {
let time = CMTime(seconds: timestamp, preferredTimescale: CMTimeScale(600))
_adapter?.append(self.cvPixelBuffer, withPresentationTime: time)
if !hasWrittenFirstVideoFrame { hasWrittenFirstVideoFrame = true }
}
} else if output == _audioOutput {
if _assetWriterAudioInput?.isReadyForMoreMediaData == true, hasWrittenFirstVideoFrame {
_assetWriterAudioInput?.append(sampleBuffer)
}
}
break
case .end:
guard _assetWriterVideoInput?.isReadyForMoreMediaData == true, _assetWriter!.status != .failed else { break }
_assetWriterVideoInput?.markAsFinished()
_assetWriterAudioInput?.markAsFinished()
_assetWriter?.finishWriting { [weak self] in
guard let output = self?._assetWriter?.outputURL else { return }
self?._captureState = .idle
self?._assetWriter = nil
self?._assetWriterVideoInput = nil
self?._assetWriterAudioInput = nil
self?.previewRecordedVideo(with: output)
}
default:
break
}
}
It's true that in the .capturing state you make sure the first sample buffer written is a video sample buffer by discarding preceding audio sample buffers - however you are still allowing an audio sample buffer's presentation timestamp to start the timeline with writer.startSession(atSourceTime:). This means your video starts with nothing, so not only do you briefly hear nothing (which is hard to notice) you also see nothing, which your video player happens to represent with a black frame.
From this point of view, there are no black frames to remove, there is only a void to fill. You can fill this void by starting the session from the first video timestamp.
This can be achieved by guarding against non-video sample buffers in the .start state, or less cleanly by moving writer.startSession(atSourceTime:) into if !hasWrittenFirstVideoFrame {} I guess.
p.s. why do you convert back and forth between CMTime and seconds? Why not stick with CMTime?

AVAssetWriter - Capturing video but no audio

I am making an app that records video. Up until now, I have been able to successfully record video and audio using AVCaptureMovieFileOutput, however, I now have a need to edit the video frames in real time to overlay some data onto the video. I began the switch to AVAssetWriter.
After the switch, I am able to record video (with my overlays) just fine using AVCaptureVideoDataOutput, however, AVCaptureAudioDataOutput never calls the delegate method so my audio doesn't record.
This is how I set up my AVCaptureSession:
fileprivate func setupCamera() {
//Set queues
queue = DispatchQueue(label: "myqueue", qos: .utility, attributes: .concurrent, autoreleaseFrequency: DispatchQueue.AutoreleaseFrequency.inherit, target: DispatchQueue.global())
//The size of output video will be 720x1280
print("Established AVCaptureSession")
cameraSession.sessionPreset = AVCaptureSession.Preset.hd1280x720
//Setup your camera
//Detect which type of camera should be used via `isUsingFrontFacingCamera`
let videoDevice: AVCaptureDevice
videoDevice = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera, for: AVMediaType.video, position: AVCaptureDevice.Position.front)!
print("Created AVCaptureDeviceInput: video")
//Setup your microphone
var audioDevice: AVCaptureDevice
//audioDevice = AVCaptureDevice.default(for: AVMediaType.audio)!
audioDevice = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInMicrophone, for: AVMediaType.audio, position: AVCaptureDevice.Position.unspecified)!
print("Created AVCaptureDeviceInput: audio")
do {
cameraSession.beginConfiguration()
cameraSession.automaticallyConfiguresApplicationAudioSession = false
cameraSession.usesApplicationAudioSession = true
// Add camera to your session
let videoInput = try AVCaptureDeviceInput(device: videoDevice)
if cameraSession.canAddInput(videoInput) {
cameraSession.addInput(videoInput)
print("Added AVCaptureDeviceInput: video")
} else
{
print("Could not add VIDEO!!!")
}
// Add microphone to your session
let audioInput = try AVCaptureDeviceInput(device: audioDevice)
if cameraSession.canAddInput(audioInput) {
cameraSession.addInput(audioInput)
print("Added AVCaptureDeviceInput: audio")
} else
{
print("Could not add MIC!!!")
}
//Define your video output
videoDataOutput.videoSettings = [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA,
]
videoDataOutput.alwaysDiscardsLateVideoFrames = true
if cameraSession.canAddOutput(videoDataOutput) {
videoDataOutput.setSampleBufferDelegate(self, queue: queue)
cameraSession.addOutput(videoDataOutput)
print("Added AVCaptureDataOutput: video")
}
//Define your audio output
if cameraSession.canAddOutput(audioDataOutput) {
audioDataOutput.setSampleBufferDelegate(self, queue: queue)
cameraSession.addOutput(audioDataOutput)
print("Added AVCaptureDataOutput: audio")
}
//Set up the AVAssetWriter (to write to file)
do {
videoWriter = try AVAssetWriter(outputURL: getURL()!, fileType: AVFileType.mp4)
print("Setup AVAssetWriter")
//Video Settings
let videoSettings: [String : Any] = [
AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoWidthKey : 720,
AVVideoHeightKey : 1280,
]
videoWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
videoWriterVideoInput?.expectsMediaDataInRealTime = true;
print("Setup AVAssetWriterInput: Video")
if (videoWriter?.canAdd(videoWriterVideoInput!))!
{
videoWriter?.add(videoWriterVideoInput!)
print("Added AVAssetWriterInput: Video")
} else{
print("Could not add VideoWriterInput to VideoWriter")
}
// Add the audio input
//Audio Settings
let audioSettings : [String : Any] = [
AVFormatIDKey : kAudioFormatMPEG4AAC,
AVSampleRateKey : 44100,
AVEncoderBitRateKey : 64000,
AVNumberOfChannelsKey: 1
]
videoWriterAudioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioSettings)
videoWriterAudioInput?.expectsMediaDataInRealTime = true;
print("Setup AVAssetWriterInput: Audio")
if (videoWriter?.canAdd(videoWriterAudioInput!))!
{
videoWriter?.add(videoWriterAudioInput!)
print("Added AVAssetWriterInput: Audio")
} else{
print("Could not add AudioWriterInput to VideoWriter")
}
}
catch {
print("ERROR")
return
}
//PixelWriter
videoWriterInputPixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterVideoInput!, sourcePixelBufferAttributes: [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA,
kCVPixelBufferWidthKey as String: 1280,
kCVPixelBufferHeightKey as String: 768,
kCVPixelFormatOpenGLESCompatibility as String: true,
])
print("Created AVAssetWriterInputPixelBufferAdaptor")
//Present the preview of video
previewLayer = AVCaptureVideoPreviewLayer(session: cameraSession)
previewLayer.position = CGPoint.init(x: CGFloat(self.view.frame.width/2), y: CGFloat(self.view.frame.height/2))
previewLayer.bounds = self.view.bounds
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraView.layer.addSublayer(previewLayer)
print("Created AVCaptureVideoPreviewLayer")
//Don't forget start running your session
//this doesn't mean start record!
cameraSession.commitConfiguration()
cameraSession.startRunning()
}
catch let error {
debugPrint(error.localizedDescription)
}
}
Start recording:
func startRecording()
{
print("Begin Recording...")
let recordingClock = self.cameraSession.masterClock
isRecording = true
videoWriter?.startWriting()
videoWriter?.startSession(atSourceTime: CMClockGetTime(recordingClock!))
}
Stop recording:
func stopRecording()
{
if (videoWriter?.status.rawValue == 1) {
videoWriterVideoInput?.markAsFinished()
videoWriterAudioInput?.markAsFinished()
print("video finished")
print("audio finished")
}else{
print("not writing")
}
self.videoWriter?.finishWriting(){
self.isRecording = false
print("finished writing")
DispatchQueue.main.async{
if self.videoWriter?.status == AVAssetWriterStatus.failed {
print("status: failed")
}else if self.videoWriter?.status == AVAssetWriterStatus.completed{
print("status: completed")
}else if self.videoWriter?.status == AVAssetWriterStatus.cancelled{
print("status: cancelled")
}else{
print("status: unknown")
}
if let e=self.videoWriter?.error{
print("stop record error:", e)
}
}
}
print("Stop Recording!")
}
And this is the delegate method, which gets called for video, but not for audio:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
autoreleasepool {
guard captureOutput != nil,
sampleBuffer != nil,
connection != nil,
CMSampleBufferDataIsReady(sampleBuffer) else { return }
guard CMSampleBufferDataIsReady(sampleBuffer) else{
return
}
if (connection.isVideoOrientationSupported) {
connection.videoOrientation = currentVideoOrientation()
} else
{
return
}
if (connection.isVideoStabilizationSupported) {
//connection.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto
}
if !self.isRecording
{
return
}
var string = ""
if let audio = self.videoWriterAudioInput
{
if connection.audioChannels.count > 0
{
//EXECUTION NEVER REACHES HERE
if audio.isReadyForMoreMediaData
{
queue!.async() {
audio.append(sampleBuffer)
}
return
}
}
}
print ("\(string)")
if let camera = self.videoWriterVideoInput, camera.isReadyForMoreMediaData {
//This is getting called!!!
queue!.async() {
self.videoWriterInputPixelBufferAdaptor.append(self.imageToBuffer(from: image!)!, withPresentationTime: timestamp)
}
}
}//End autoreleasepool
}
}
I am sure the problem does not lie with my devices or inputs, as I was able to successfully record video and audio using AVCaptureMovieFileOutput. I have also read other relevant posts with no luck:
Corrupt video capturing audio and video using AVAssetWriter
VAssetWriter audio with video together
Ripped my hair out for days on this. My mistake was simple - The delegate method was being called, but was being returned BEFORE I reached the audio statements. These were the culprits which needed to be moved to after the audio processing portion of my code:
if (connection.isVideoOrientationSupported) {
connection.videoOrientation = currentVideoOrientation()
} else
{
return
}
if (connection.isVideoStabilizationSupported) {
//connection.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto
}

Video is not getting saved + Appending pixel buffer to adapter returns false , both is very rare and random

I am writing a video to photo library / document directory using capture session and AVAssetWriter. What I want to know when I append pixel buffer to the adapter I do get false here print("video is (bobo)")same with audio.
This doesn't save my output file and I do get an error on export and saving.
I am working on it from so long any suggestions or mistake would help me a lot.
Main problem is this issue is very random lets say 1 in 10 times but it do persist and I want to eliminate this issue.
My code where I am appending pixel buffer to adapter
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!)
{
starTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
if captureOutput == videoOutput
{
if self.record == true{
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
if self.record == true
{
if self.writerInput.isReadyForMoreMediaData
{
DispatchQueue(label: "newQeueLocalFeedVideo2", attributes: DispatchQueue.Attributes.concurrent).sync(execute: {
starTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
let bobo = self.adapter.append(pixelBuffer!, withPresentationTime: self.starTime)
print("video is \(bobo)")
})
}
}
}
}else if captureOutput == audioOutput{
if self.record == true
{
if audioWriterInput.isReadyForMoreMediaData
{
let bo = audioWriterInput.append(sampleBuffer)
print("audio conversion is \(bo)")
}
}
}
}
/*****------******/
Code where I am setting asset writer
{
let fileUrl = URL(fileURLWithPath: NSTemporaryDirectory()).appendingPathComponent("\(getCurrentDate())-capturedvideo.mp4")
lastPath = fileUrl.path
videoWriter = try? AVAssetWriter(outputURL: fileUrl, fileType: AVFileTypeMPEG4)
lastPathURL = fileUrl
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width) as Float), AVVideoHeightKey : NSNumber(value: Float(outputSize.height) as Float)] as [String : Any]
writerInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
writerInput.expectsMediaDataInRealTime = true
// writerInput.performsMultiPassEncodingIfSupported = true
audioWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: DejalActivityView.getAudioDictionary() as? [String:AnyObject])
videoWriter.add(writerInput)
videoWriter.add(audioWriterInput)
adapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: writerInput, sourcePixelBufferAttributes: DejalActivityView.getAdapterDictionary() as? [String:AnyObject])
videoWriter.startWriting()
videoWriter.startSession(atSourceTime: starTime)
//self.client?.recordCaptureSession.captureSession.startRunning()
record = true
}
And to export the file to a video I am using this code.
self.videoWriter.finishWriting { () -> Void in
Thread.sleep(forTimeInterval: 1.0)
if self.videoWriter.status == AVAssetWriterStatus.failed {
print("oh noes, an error: \(self.videoWriter.error.debugDescription)")
completionHandler(true)
} else {
let content = FileManager.default.contents(atPath: self.lastPathURL.path)
print("wrote video: \(self.lastPathURL.path) at size: \(content?.count)")
// This below line will save the video to photo library
HEPhotoLibraryHelper.saveVideosToPhotoLibrary(self.lastPathURL, withCompletionBlock: { (result) in
if result == true
{
do
{
try HEDocDirectory.shared.fileManagerDefault .removeItem(atPath: self.lastPath)
}catch let err as NSError
{
print("Error in removing file from doc dir \(err.localizedDescription)")
}
}
})
}
}

Resources