AVCaptureMovieFileOutput record audio only for the first video - ios

I'm developing an application using swift that record video for a precise duration. That's 7 sec in this example.
All work's great for the first record, but the successive record doesn't contain audio but only video.
I read something about changing the movieFragmentInterval of the AVCaptureMovieFileOutput instance, I try but nothing change.
func setup(){
self.session = AVCaptureSession()
self.session.sessionPreset = AVCaptureSessionPresetHigh
// INPUT
var devices : Array<AVCaptureDevice> = AVCaptureDevice.devices() as Array<AVCaptureDevice>
var camera:AVCaptureDevice!
var microphone:AVCaptureDevice!
for device in devices{
println("Device name: \(device)")
if device.hasTorch && device.isTorchModeSupported(AVCaptureTorchMode.On){
self.torch = device
}
if device.hasMediaType(AVMediaTypeAudio){
microphone = device
}
if device.hasMediaType(AVMediaTypeVideo){
if device.position == .Back{
println("Device position: back.")
camera = device
} } }
var error:NSErrorPointer = nil
// audio
if (microphone != nil){
var audioInput = AVCaptureDeviceInput(device: microphone, error: error)
if error == nil{
// if !error{
if self.session.canAddInput(audioInput){
self.session.addInput(audioInput)
} } }
// video input
if (camera != nil){
var VideoInput = AVCaptureDeviceInput(device: camera, error: error)
// if !error{
if error == nil{
if self.session.canAddInput(VideoInput){
self.session.addInput(VideoInput)
} } }
self.output = AVCaptureMovieFileOutput()
var preferredTimeScale:Int32 = 30
var totalSeconds:Int64 = Int64(Int(7) * Int(preferredTimeScale)) // after 7 sec video recording stop automatically
var maxDuration:CMTime = CMTimeMake(totalSeconds, preferredTimeScale)
self.output.maxRecordedDuration = maxDuration
self.output.minFreeDiskSpaceLimit = 1024 * 1024
if session.canAddOutput(self.output){
session.addOutput(self.output)
}
self.connection = self.output.connectionWithMediaType(AVMediaTypeVideo)
if self.connection.supportsVideoStabilization == true{
println("video stabilization avaible")
self.connection.enablesVideoStabilizationWhenAvailable = true
}
self.connection.videoOrientation = .LandscapeRight
self.session.startRunning()
}
func startRecording(){
self.output.startRecordingToOutputFileURL(self.tempPathURL, recordingDelegate: self)
}
func stopRecording(){
self.output.stopRecording()
}
func captureOutput(captureOutput: AVCaptureFileOutput!, didFinishRecordingToOutputFileAtURL outputFileURL: NSURL!, fromConnections connections: [AnyObject]!, error: NSError!){
println("Finish recording")
var success:Bool = false
if error != 0 && error != nil{
println("error")
let value: AnyObject? = error.userInfo?[AVErrorRecordingSuccessfullyFinishedKey]
if value == nil{
success = true
}else{
success = false
}
}
if success == true{
stopRecording()
}
}

Related

AVCapturePhoto SemanticSegmentationMatte nil without audio input?

When I add audio input to capture session, photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) callback returns semantic segmentation mattes properly. Without audio input, returned mattes are nil. Is it possible to avoid adding audio input and requesting user to give permission for microphone in order to get mattes?
// MARK: - Session
private func setupSession() {
captureSession = AVCaptureSession()
captureSession?.sessionPreset = .photo
setupInputOutput()
setupPreviewLayer(view)
captureSession?.startRunning()
}
// MARK: - Settings
private func setupCamera() {
settings = AVCapturePhotoSettings()
let supportsHEVC = AVAssetExportSession.allExportPresets().contains(AVAssetExportPresetHEVCHighestQuality)
settings = supportsHEVC ? AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc]) : AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
settings!.flashMode = .auto
settings!.isHighResolutionPhotoEnabled = true
settings!.previewPhotoFormat = [kCVPixelBufferPixelFormatTypeKey as String: settings!.__availablePreviewPhotoPixelFormatTypes.first ?? NSNumber()]
settings!.isDepthDataDeliveryEnabled = true
settings!.isPortraitEffectsMatteDeliveryEnabled = true
if self.photoOutput?.enabledSemanticSegmentationMatteTypes.isEmpty == false {
settings!.enabledSemanticSegmentationMatteTypes = self.photoOutput?.enabledSemanticSegmentationMatteTypes ?? [AVSemanticSegmentationMatte.MatteType]()
}
settings!.photoQualityPrioritization = self.photoQualityPrioritizationMode
}
private func setupInputOutput() {
photoOutput = AVCapturePhotoOutput()
guard let captureSession = captureSession else { return }
guard let photoOutput = photoOutput else { return }
do {
captureSession.beginConfiguration()
captureSession.sessionPreset = .photo
let devices = self.videoDeviceDiscoverySession.devices
currentDevice = devices.first(where: { $0.position == .front && $0.deviceType == .builtInTrueDepthCamera })
guard let videoDevice = currentDevice else {
captureSession.commitConfiguration()
return
}
videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
if captureSession.canAddInput(videoDeviceInput) {
captureSession.addInput(videoDeviceInput)
} else {
captureSession.commitConfiguration()
return
}
currentDevice = AVCaptureDevice.default(for: .audio)
captureDeviceInput = try AVCaptureDeviceInput(device: currentDevice!)
if captureSession.canAddInput(captureDeviceInput) {
captureSession.addInput(captureDeviceInput)
} else {
captureSession.commitConfiguration()
return
}
} catch {
errorMessage = error.localizedDescription
print(error.localizedDescription)
captureSession.commitConfiguration()
return
}
if captureSession.canAddOutput(photoOutput) {
captureSession.addOutput(photoOutput)
photoOutput.isHighResolutionCaptureEnabled = true
photoOutput.isLivePhotoCaptureEnabled = photoOutput.isLivePhotoCaptureSupported
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
photoOutput.isPortraitEffectsMatteDeliveryEnabled = photoOutput.isPortraitEffectsMatteDeliverySupported
photoOutput.enabledSemanticSegmentationMatteTypes = photoOutput.availableSemanticSegmentationMatteTypes
photoOutput.maxPhotoQualityPrioritization = .balanced
}
captureSession.commitConfiguration()
}
private func setupPreviewLayer(_ view: UIView) {
self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession ?? AVCaptureSession())
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
self.cameraPreviewLayer?.frame = view.frame
view.layer.insertSublayer(self.cameraPreviewLayer ?? AVCaptureVideoPreviewLayer(), at: 0)
}
I was not able to return semantic segmentation mattes (SSM) at all with/without setting up audio input. I am currently developing on an iPhone X. After struggling for some time, I asked Apple the question on a 1-1 lab session during WWDC2021. I was told that the API would only make portrait effect matte visible to my device. iPhone 11 and above would be able to get skin, teeth and hair. The new glasses ssm that they snuck in recently without announcing requires iPhone 12.

'No active and enabled video connection' error when capturing photo with TrueDepth cam

I am trying to record depth data from the TrueDepth camera along with a photo. But when calling
AVCapturePhotoOutput capturePhoto(withSettings,delegate)
I get an exception stating:
No active and enabled video connection
I configure the camera and outputs like so (basically following the guide from Apple about photo capturing and capturing depth):
func configurePhotoOutput() throws {
self.captureSession = AVCaptureSession()
guard self.captureSession != nil else {
return
}
// Select a depth-capable capture device.
guard let videoDevice = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice.lockForConfiguration()
videoDevice.activeDepthDataFormat = depthFormat
videoDevice.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
self.captureSession!.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
self.captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
self.captureSession!.addInput(videoDeviceInput)
// add video output
if self.captureSession!.canAddOutput(videoOutput) {
self.captureSession!.addOutput(videoOutput)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
} else { fatalError("Can't add video output.") }
// Set up photo output for depth data capture.
let photoOutput = AVCapturePhotoOutput()
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
guard self.captureSession!.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
self.captureSession!.addOutput(photoOutput)
self.captureSession!.sessionPreset = .photo
self.captureSession!.commitConfiguration()
self.captureSession!.startRunning()
}
And the code responsible for capturing the photo:
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled =
self.photoOutput.isDepthDataDeliverySupported
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate) // <---- error is being thrown on this call
self.photoCaptureCompletionBlock = completion
}
What I am I doing wrong in this configuration?
solved it with the following implementation:
Any comments / remarks are highly appreciated!
import AVFoundation
import UIKit
class CameraController: NSObject {
var captureSession: AVCaptureSession?
var videoDevice: AVCaptureDevice?
var previewLayer: AVCaptureVideoPreviewLayer?
var videoOutput = AVCaptureVideoDataOutput()
var photoOutput = AVCapturePhotoOutput()
func prepare(completionHandler: #escaping (Error?) -> Void) {
func createCaptureSession() {
captureSession = AVCaptureSession()
}
func configureCaptureDevices() throws {
// Select a depth-capable capture device.
guard let vd = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
videoDevice = vd
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice!.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice!.lockForConfiguration()
videoDevice!.activeDepthDataFormat = depthFormat
videoDevice!.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
}
func configureDeviceInputs() throws {
if( captureSession == nil) {
throw CameraControllerError.captureSessionIsMissing
}
captureSession?.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: self.videoDevice!),
captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
captureSession!.addInput(videoDeviceInput)
captureSession?.commitConfiguration()
}
func configurePhotoOutput() throws {
guard let captureSession = self.captureSession else { throw CameraControllerError.captureSessionIsMissing }
captureSession.beginConfiguration()
// Set up photo output for depth data capture.
photoOutput = AVCapturePhotoOutput()
photoOutput.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])], completionHandler: nil)
guard captureSession.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
captureSession.addOutput(photoOutput)
// must be set after photoOutput is added to captureSession. Why???
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
captureSession.sessionPreset = .photo
captureSession.commitConfiguration()
captureSession.startRunning()
}
DispatchQueue(label: "prepare").async {
do {
createCaptureSession()
try configureCaptureDevices()
try configureDeviceInputs()
try configurePhotoOutput()
}
catch {
DispatchQueue.main.async {
completionHandler(error)
}
return
}
DispatchQueue.main.async {
completionHandler(nil)
}
}
}
func displayPreview(on view: UIView) throws {
guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraControllerError.captureSessionIsMissing }
self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer?.connection?.videoOrientation = .portrait
view.layer.insertSublayer(self.previewLayer!, at: 0)
self.previewLayer?.frame = view.frame
}
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate)
self.photoCaptureCompletionBlock = completion
}
var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}
extension CameraController {
public enum CameraPosition {
case front
case rear
}
enum CameraControllerError: Swift.Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCamerasAvailable
case unknown
}
}

Setting AVCapturedevice.flashmode for front Camera throws "error while capturing still image" when taking still image

When I set the flashmode for my front camera and then call
let videoConnection = stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo)
stillImageOutput?.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: process)
I get the following error message:
error while capturing still image: Error Domain=AVFoundationErrorDomain Code=-11800 "The operation could not be completed" UserInfo={NSUnderlyingError=0x12eeb7200 {Error Domain=NSOSStatusErrorDomain Code=-16800 "(null)"}, NSLocalizedFailureReason=An unknown error occurred (-16800), NSLocalizedDescription=The operation could not be completed}
If I don't set the camera's flashMode and then call:
let videoConnection = stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo)
stillImageOutput?.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: process)
The front camera takes a picture and doesn't throw the error.
Currently, this is how I set up my camera:
func getCameraStreamLayer() -> CALayer? {
captureSession = AVCaptureSession()
captureSession!.sessionPreset = AVCaptureSessionPresetPhoto
currentCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput!.outputSettings = [ AVVideoCodecKey: AVVideoCodecJPEG ]
if let input = try? AVCaptureDeviceInput(device: currentCamera) as AVCaptureDeviceInput{
if captureSession!.canAddInput(input) && captureSession!.canAddOutput(stillImageOutput) {
captureSession!.addInput(input)
captureSession!.addOutput(stillImageOutput)
}
}
return AVCaptureVideoPreviewLayer(session: captureSession)
}
func toggleFlash() {
flash = !flash
if flash {
for case let (device as AVCaptureDevice) in AVCaptureDevice.devices() {
if device.hasFlash && device.flashAvailable {
if device.isFlashModeSupported(.On) {
do {
try device.lockForConfiguration()
device.flashMode = .On
device.unlockForConfiguration()
} catch {
print("Something went wrong")
}
}
}
}
}else {//turn off flash
}
}
func photograph(process: (CMSampleBuffer!,NSError!)->()) {
let videoConnection = stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo)
stillImageOutput?.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: process)
}
func flipCamera() {
guard let session = captureSession where session.running == true else {
return
}
session.beginConfiguration()
let currentCameraInput = session.inputs[0] as! AVCaptureDeviceInput
session.removeInput(currentCameraInput)
let newCamera = {
let devices = AVCaptureDevice.devicesWithMediaType(AVMediaTypeVideo)
for case let device as AVCaptureDevice in devices {
if(device.position == .Front && currentCameraInput.device.position == .Back){
return device
}
if(device.position == .Back && currentCameraInput.device.position == .Front){
return device
}
}
return nil
}() as AVCaptureDevice?
currentCamera = newCamera!
if let newVideoInput = try? AVCaptureDeviceInput(device: newCamera) {
captureSession?.addInput(newVideoInput)
}
captureSession?.commitConfiguration()
}
I'm not sure what I should do. I've tried to create a new capture session and then lock and then set the flashMode for the camera. I still get the same error.

AVFoundation take picture every second SWIFT

I'm trying to use AVFoundation framework to take a picture and analyze it in my app. I want it to take a picture every second automatically, how do I do that?
Here is my current code, right now it takes a picture only when call capturePhoto().
func setupSession() {
session = AVCaptureSession()
session.sessionPreset = AVCaptureSessionPresetPhoto
let camera = AVCaptureDevice
.defaultDeviceWithMediaType(AVMediaTypeVideo)
do { input = try AVCaptureDeviceInput(device: camera) } catch { return }
output = AVCaptureStillImageOutput()
output.outputSettings = [ AVVideoCodecKey: AVVideoCodecJPEG ]
guard session.canAddInput(input)
&& session.canAddOutput(output) else { return }
session.addInput(input)
session.addOutput(output)
previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer!.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer!.connection?.videoOrientation = .Portrait
view.layer.addSublayer(previewLayer!)
session.startRunning()
}
func capturePhoto() {
guard let connection = output.connectionWithMediaType(AVMediaTypeVideo) else { return }
connection.videoOrientation = .Portrait
output.captureStillImageAsynchronouslyFromConnection(connection) { (sampleBuffer, error) in
guard sampleBuffer != nil && error == nil else { return }
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
guard let image = UIImage(data: imageData) else { return }
//do stuff with image
}
}
What should I change?
So create an NSTimer that fires once a second, and in that NSTimer's method call capturePhoto:
Create a timer that fires once a second:
var cameraTimer = NSTimer.scheduledTimerWithTimeInterval(1.0,
target: self,
#selector(timerCalled(_:)),
userInfo: nil,
repeats: true)
Your timerCalled function might look like this:
func timerCalled(timer: NSTimer) {
capturePhoto()
}

AVCaptureFileOutputRecordingDelegate not writing to file, Swift 2

My view controller contains a preview layer, which projects the image from my camera live. When pressing and holding a button, my code is supposed to record a video, and write it to a temporary file locally. This worked well with Swift 1.2 and Xcode 6, but stopped working after I converted the code to Swift 2 when updating to Xcode 7.
When I let go of the button, the captureOutput doesn´t get called, and there is no file written to the given path.
Some relevant code follows.
I would appreciate any help!
import UIKit
import MobileCoreServices
import AVFoundation
import AVKit
class ViewControllerPhoto: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate, UIPickerViewDelegate, UIGestureRecognizerDelegate, ACEDrawingViewDelegate, UITextViewDelegate, AVCaptureFileOutputRecordingDelegate, UITableViewDelegate, UITableViewDataSource {
#IBOutlet weak var captureButton: UIButton!
var videoCheck: Bool = false
let captureSession = AVCaptureSession()
var previewLayer : AVCaptureVideoPreviewLayer?
var captureDevice : AVCaptureDevice?
var movieFileOutput = AVCaptureMovieFileOutput()
var imageData: NSData!
var outputPath: NSString!
var outputURL: NSURL!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
if captureSession.canSetSessionPreset(AVCaptureSessionPresetMedium) {
captureSession.sessionPreset = AVCaptureSessionPresetMedium
}
let devices = AVCaptureDevice.devices()
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.Back) {
captureDevice = device as? AVCaptureDevice
if captureDevice != nil {
print("Capture device found")
beginSession()
}
}
}
}
self.videoCheck = false
}
func beginSession() {
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
configureDevice()
var err : NSError? = nil
// captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err))
do{
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
}
catch{
print("error: \(err?.localizedDescription)")
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.view.layer.addSublayer(previewLayer!)
previewLayer?.frame = self.view.layer.frame
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
if captureSession.canAddOutput(movieFileOutput) {
self.captureSession.addOutput(movieFileOutput)
}
// SET CONNECTION PROPERTIES
var captureConnection: AVCaptureConnection = movieFileOutput.connectionWithMediaType(AVMediaTypeVideo)
if captureConnection.supportsVideoOrientation {
captureConnection.videoOrientation = AVCaptureVideoOrientation.Portrait
}
var audioDevice: AVCaptureDevice = AVCaptureDevice.devicesWithMediaType(AVMediaTypeAudio)[0] as! AVCaptureDevice
do{
let audioDeviceInput: AVCaptureDeviceInput = try AVCaptureDeviceInput(device: audioDevice)
if captureSession.canAddInput(audioDeviceInput) {
captureSession.addInput(audioDeviceInput)
}
}
catch {
print("error")
}
captureSession.startRunning()
}
func captureVideo() {
outputPath = (NSURL(fileURLWithPath: NSTemporaryDirectory())).URLByAppendingPathComponent("movie.mov").absoluteString as NSString
outputURL = NSURL(fileURLWithPath: outputPath as String)
let fileManager: NSFileManager = NSFileManager.defaultManager()
if outputURL.path != nil{
if fileManager.fileExistsAtPath(outputURL.path!) {
do{
try fileManager.removeItemAtPath(outputPath as String)
}
catch{
print(error)
}
}
}
self.movieFileOutput.startRecordingToOutputFileURL(outputURL, recordingDelegate: self)
}
func cameraWithPosition(position: AVCaptureDevicePosition) -> AVCaptureDevice {
let devices: NSArray = AVCaptureDevice.devicesWithMediaType(AVMediaTypeVideo)
for device in devices {
if(device.position == position){
return device as! AVCaptureDevice
}
}
return AVCaptureDevice()
}
#IBAction func captureButtonIsLongPressed(sender: UILongPressGestureRecognizer) {
if sender.state == UIGestureRecognizerState.Began {
videoCheck = true
captureVideo()
}
else if sender.state == UIGestureRecognizerState.Ended{
self.movieFileOutput.stopRecording()
}
}
func captureOutput(captureOutput: AVCaptureFileOutput!, didFinishRecordingToOutputFileAtURL outputFileURL: NSURL!, fromConnections connections: [AnyObject]!, error: NSError!) {
print("Output")
playVideo()
}
func playVideo() {
let path = outputPath
let url = outputURL
let player = AVPlayer(URL: url)
let playerLayer = AVPlayerLayer(player: player)
playerLayer.frame = self.view.bounds
player.play()
}
}
Figured it out after a lot of hazzle. My mistake was lying in the line
outputPath = (NSURL(fileURLWithPath: NSTemporaryDirectory())).URLByAppendingPathComponent("movie.mov").absoluteString as NSString
Of course this is a NSString, not a NSPath. Changing to these lines fixed my problem:
outputURL = NSURL(fileURLWithPath: NSTemporaryDirectory(), isDirectory: true).URLByAppendingPathComponent("movie.mov")
outputPath = outputURL.path
Hope this helps anyone!

Resources