torchMode blinks at the start and immediately goes off - ios

I don't understand why torch is not continuously on.
It blinks at the start and immediately goes off.
I did lock and unlock as well.
I added part of my code which I think is relevent with the problem here.
override func viewDidLoad() {
super.viewDidLoad()
initCamera()
}
func initCamera(){
captureSession.sessionPreset = .medium
guard let device = AVCaptureDevice.default(for: .video) else {
print("no camera")
return
}
try? device.lockForConfiguration()
device.activeVideoMaxFrameDuration = CMTimeMake(1, 30)
if device.hasTorch && device.isTorchAvailable && !device.isTorchActive{
do {
try device.setTorchModeOn(level: 1.0)
device.torchMode = .on
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
if device.isFocusModeSupported(.continuousAutoFocus){
device.focusMode = .continuousAutoFocus
}
device.unlockForConfiguration()
guard let captureInput = try? AVCaptureDeviceInput(device: device) else {return;}
if captureSession.canAddInput(captureInput) {
captureSession.addInput(captureInput)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
let videoOutput = AVCaptureVideoDataOutput()
let queue = DispatchQueue(label: "queue image delegate", attributes: .concurrent)
videoOutput.setSampleBufferDelegate(self, queue: queue)
if captureSession.canAddOutput(videoOutput)
{
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
}

Related

How to add timestamp watermark while recording video?

I'm making iOS video recording app using AVFoundation. Every frame of a video should have timestamp showing time when the video frame was recorded. I looked internet but could not find any reasonable articles.
Here is my current code around video record.
private var session = AVCaptureSession()
private let videoOutput = AVCaptureVideoDataOutput()
private let audioOutput = AVCaptureAudioDataOutput()
private func setupCaptureSession() {
session.sessionPreset = .vga640x480
guard
let videoDevice = AVCaptureDevice.default(for: .video),
let audioDevice = AVCaptureDevice.default(for: .audio),
let videoInput = try? AVCaptureDeviceInput(device: videoDevice),
let audioInput = try? AVCaptureDeviceInput(device: audioDevice) else {
fatalError()
}
session.beginConfiguration()
session.addInput(videoInput)
session.addInput(audioInput)
session.addOutput(videoOutput)
session.addOutput(audioOutput)
session.commitConfiguration()
DispatchQueue.main.async {
let previewView = PreviewView()
previewView.videoPreviewLayer.session = self.session
previewView.videoPreviewLayer.connection?.videoOrientation = .landscapeRight
previewView.frame = self.view.bounds
previewView.autoresizingMask = [.flexibleWidth, .flexibleHeight]
self.compositeImageView.insertSubview(previewView, at: 0)
self.session.startRunning()
}
}
private func startRecording() {
// AVAssetWriter
assetWriter = try! AVAssetWriter(outputURL: self.exportURL!, fileType: .mov)
// video
let videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: [
AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoWidthKey : 640,
AVVideoHeightKey : 480
])
videoInput.expectsMediaDataInRealTime = true
assetWriter?.add(videoInput)
// audio
let audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: nil)
audioInput.expectsMediaDataInRealTime = true
assetWriter?.add(audioInput)
assetWriter?.startWriting()
let queue = DispatchQueue.global()
videoOutput.setSampleBufferDelegate(self, queue: queue)
audioOutput.setSampleBufferDelegate(self, queue: queue)
}
private func finishRecording() {
videoOutput.setSampleBufferDelegate(nil, queue: nil)
audioOutput.setSampleBufferDelegate(nil, queue: nil)
startTime = nil
assetWriter?.finishWriting { [weak self] in
guard let self = self else { return }
guard self.assetWriter!.status == .completed else { fatalError("failed recording") }
self.saveToPhotoLibrary { isSaveToPhotoLibrarySucceed in
print("vide saved to photo library")
guard isSaveToPhotoLibrarySucceed else {
print("Save to photo library failed")
return
}
self.saveToRealmFromTempVideo {
self.uploadVideoToServer()
}
}
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard isRecording else { fatalError() }
guard CMSampleBufferDataIsReady(sampleBuffer) else {
print("not ready")
return
}
if startTime == nil {
startTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
assetWriter?.startSession(atSourceTime: startTime!)
}
// Append video or audio
let mediaType: AVMediaType = output is AVCaptureVideoDataOutput ? .video : .audio
if mediaType == .video {
appendVideo(from: sampleBuffer)
} else if mediaType == .audio {
appendAudio(from: sampleBuffer)
} else {
fatalError("should not reach here")
}
}

AVCapturePhoto SemanticSegmentationMatte nil without audio input?

When I add audio input to capture session, photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) callback returns semantic segmentation mattes properly. Without audio input, returned mattes are nil. Is it possible to avoid adding audio input and requesting user to give permission for microphone in order to get mattes?
// MARK: - Session
private func setupSession() {
captureSession = AVCaptureSession()
captureSession?.sessionPreset = .photo
setupInputOutput()
setupPreviewLayer(view)
captureSession?.startRunning()
}
// MARK: - Settings
private func setupCamera() {
settings = AVCapturePhotoSettings()
let supportsHEVC = AVAssetExportSession.allExportPresets().contains(AVAssetExportPresetHEVCHighestQuality)
settings = supportsHEVC ? AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc]) : AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
settings!.flashMode = .auto
settings!.isHighResolutionPhotoEnabled = true
settings!.previewPhotoFormat = [kCVPixelBufferPixelFormatTypeKey as String: settings!.__availablePreviewPhotoPixelFormatTypes.first ?? NSNumber()]
settings!.isDepthDataDeliveryEnabled = true
settings!.isPortraitEffectsMatteDeliveryEnabled = true
if self.photoOutput?.enabledSemanticSegmentationMatteTypes.isEmpty == false {
settings!.enabledSemanticSegmentationMatteTypes = self.photoOutput?.enabledSemanticSegmentationMatteTypes ?? [AVSemanticSegmentationMatte.MatteType]()
}
settings!.photoQualityPrioritization = self.photoQualityPrioritizationMode
}
private func setupInputOutput() {
photoOutput = AVCapturePhotoOutput()
guard let captureSession = captureSession else { return }
guard let photoOutput = photoOutput else { return }
do {
captureSession.beginConfiguration()
captureSession.sessionPreset = .photo
let devices = self.videoDeviceDiscoverySession.devices
currentDevice = devices.first(where: { $0.position == .front && $0.deviceType == .builtInTrueDepthCamera })
guard let videoDevice = currentDevice else {
captureSession.commitConfiguration()
return
}
videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
if captureSession.canAddInput(videoDeviceInput) {
captureSession.addInput(videoDeviceInput)
} else {
captureSession.commitConfiguration()
return
}
currentDevice = AVCaptureDevice.default(for: .audio)
captureDeviceInput = try AVCaptureDeviceInput(device: currentDevice!)
if captureSession.canAddInput(captureDeviceInput) {
captureSession.addInput(captureDeviceInput)
} else {
captureSession.commitConfiguration()
return
}
} catch {
errorMessage = error.localizedDescription
print(error.localizedDescription)
captureSession.commitConfiguration()
return
}
if captureSession.canAddOutput(photoOutput) {
captureSession.addOutput(photoOutput)
photoOutput.isHighResolutionCaptureEnabled = true
photoOutput.isLivePhotoCaptureEnabled = photoOutput.isLivePhotoCaptureSupported
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
photoOutput.isPortraitEffectsMatteDeliveryEnabled = photoOutput.isPortraitEffectsMatteDeliverySupported
photoOutput.enabledSemanticSegmentationMatteTypes = photoOutput.availableSemanticSegmentationMatteTypes
photoOutput.maxPhotoQualityPrioritization = .balanced
}
captureSession.commitConfiguration()
}
private func setupPreviewLayer(_ view: UIView) {
self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession ?? AVCaptureSession())
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
self.cameraPreviewLayer?.frame = view.frame
view.layer.insertSublayer(self.cameraPreviewLayer ?? AVCaptureVideoPreviewLayer(), at: 0)
}
I was not able to return semantic segmentation mattes (SSM) at all with/without setting up audio input. I am currently developing on an iPhone X. After struggling for some time, I asked Apple the question on a 1-1 lab session during WWDC2021. I was told that the API would only make portrait effect matte visible to my device. iPhone 11 and above would be able to get skin, teeth and hair. The new glasses ssm that they snuck in recently without announcing requires iPhone 12.

AVFoundation Switching Cameras Slow (Connected To Sample Buffer)

override func viewDidLoad() {
super.viewDidLoad()
...
setupDevice()
setupInputOutput()
}
View did load starts the setup chain
func setupDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
setupCorrectFramerate(currentCamera: backCamera!)
}
else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
setupCorrectFramerate(currentCamera: frontCamera!)
}
}
currentCamera = cameraModeIsBack ? backCamera : frontCamera
}
var deviceInput: AVCaptureDeviceInput?
let videoOutput = AVCaptureVideoDataOutput()
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
deviceInput = captureDeviceInput
captureSession.sessionPreset = AVCaptureSession.Preset.hd1280x720
if captureSession.canAddInput(deviceInput!) {
captureSession.addInput(deviceInput!)
}
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "sample buffer delegate", attributes: []))
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
if(firstTime){
setupMetal()
}
firstTime = false
toggleCamerButton.isEnabled = true
} catch {
print(error)
}
}
On a high level I find the device inputs, setup their framerate and setup the capture session, (the buffer output gets processed to a MTKView).
The issue is when I change cameras. It's either snappy and isntant or becomes very slow and freezes for 4-5 seconds before responding.
The logic I use to change the cameras:
func updateCameras(){
DispatchQueue.global().async {
self.captureSession.stopRunning()
self.captureSession.removeInput(self.deviceInput!)
self.currentCamera = self.cameraModeIsBack ? self.backCamera : self.frontCamera
self.changeCameras()
}
}
func changeCameras(){
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: self.currentCamera!)
self.deviceInput = captureDeviceInput
if self.captureSession.canAddInput(self.deviceInput!) {
self.captureSession.addInput(self.deviceInput!)
}
self.captureSession.startRunning()
DispatchQueue.main.async {
self.toggleCamerButton.isEnabled = true
}
} catch {
print(error)
}
}
I've revised it as much as I can. Switching cameras is essentially
Stopping the camera session
removing the input
adding the input
starting the session
I have no idea why on earth it's so variable (works perfectly most of the time on my iPhone X while on my iPhone 6 it's always slow).
I suspect the calls to manipulate the cameraSession should be done on the main thread. Try changing updateCameras() to use DispatchQueue.main.async() instead of DispatchQueue.global().async
Like this:
func updateCameras(){
DispatchQueue.main.async {
self.captureSession.stopRunning()
self.captureSession.removeInput(self.deviceInput!)
self.currentCamera = self.cameraModeIsBack ? self.backCamera : self.frontCamera
self.changeCameras()
}
}
With that change the call to DispatchQueue.main.async() in your changeCameras() function should no longer be needed

'No active and enabled video connection' error when capturing photo with TrueDepth cam

I am trying to record depth data from the TrueDepth camera along with a photo. But when calling
AVCapturePhotoOutput capturePhoto(withSettings,delegate)
I get an exception stating:
No active and enabled video connection
I configure the camera and outputs like so (basically following the guide from Apple about photo capturing and capturing depth):
func configurePhotoOutput() throws {
self.captureSession = AVCaptureSession()
guard self.captureSession != nil else {
return
}
// Select a depth-capable capture device.
guard let videoDevice = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice.lockForConfiguration()
videoDevice.activeDepthDataFormat = depthFormat
videoDevice.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
self.captureSession!.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
self.captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
self.captureSession!.addInput(videoDeviceInput)
// add video output
if self.captureSession!.canAddOutput(videoOutput) {
self.captureSession!.addOutput(videoOutput)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
} else { fatalError("Can't add video output.") }
// Set up photo output for depth data capture.
let photoOutput = AVCapturePhotoOutput()
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
guard self.captureSession!.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
self.captureSession!.addOutput(photoOutput)
self.captureSession!.sessionPreset = .photo
self.captureSession!.commitConfiguration()
self.captureSession!.startRunning()
}
And the code responsible for capturing the photo:
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled =
self.photoOutput.isDepthDataDeliverySupported
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate) // <---- error is being thrown on this call
self.photoCaptureCompletionBlock = completion
}
What I am I doing wrong in this configuration?
solved it with the following implementation:
Any comments / remarks are highly appreciated!
import AVFoundation
import UIKit
class CameraController: NSObject {
var captureSession: AVCaptureSession?
var videoDevice: AVCaptureDevice?
var previewLayer: AVCaptureVideoPreviewLayer?
var videoOutput = AVCaptureVideoDataOutput()
var photoOutput = AVCapturePhotoOutput()
func prepare(completionHandler: #escaping (Error?) -> Void) {
func createCaptureSession() {
captureSession = AVCaptureSession()
}
func configureCaptureDevices() throws {
// Select a depth-capable capture device.
guard let vd = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
videoDevice = vd
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice!.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice!.lockForConfiguration()
videoDevice!.activeDepthDataFormat = depthFormat
videoDevice!.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
}
func configureDeviceInputs() throws {
if( captureSession == nil) {
throw CameraControllerError.captureSessionIsMissing
}
captureSession?.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: self.videoDevice!),
captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
captureSession!.addInput(videoDeviceInput)
captureSession?.commitConfiguration()
}
func configurePhotoOutput() throws {
guard let captureSession = self.captureSession else { throw CameraControllerError.captureSessionIsMissing }
captureSession.beginConfiguration()
// Set up photo output for depth data capture.
photoOutput = AVCapturePhotoOutput()
photoOutput.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])], completionHandler: nil)
guard captureSession.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
captureSession.addOutput(photoOutput)
// must be set after photoOutput is added to captureSession. Why???
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
captureSession.sessionPreset = .photo
captureSession.commitConfiguration()
captureSession.startRunning()
}
DispatchQueue(label: "prepare").async {
do {
createCaptureSession()
try configureCaptureDevices()
try configureDeviceInputs()
try configurePhotoOutput()
}
catch {
DispatchQueue.main.async {
completionHandler(error)
}
return
}
DispatchQueue.main.async {
completionHandler(nil)
}
}
}
func displayPreview(on view: UIView) throws {
guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraControllerError.captureSessionIsMissing }
self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer?.connection?.videoOrientation = .portrait
view.layer.insertSublayer(self.previewLayer!, at: 0)
self.previewLayer?.frame = view.frame
}
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate)
self.photoCaptureCompletionBlock = completion
}
var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}
extension CameraController {
public enum CameraPosition {
case front
case rear
}
enum CameraControllerError: Swift.Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCamerasAvailable
case unknown
}
}

AVFoundation camera crashing while switching to front camera (refreshing camera)

I am a new bee to iOS, working on simple application with swift, In that I need a custom camera. I am using AVFoundation but the app is showing black screen for long time, then it's loading the camera. here is my code
func prepareCamera() {
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
if frontCam{
if let availableDevices = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .front).devices {
captureDevice = availableDevices.first
DispatchQueue(label: "prepare").async {
self.beginSession()
}
}
}else{
if let availableDevices = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .back).devices {
captureDevice = availableDevices.first
beginSession()
}
}
}
#IBAction func switchCameraBtnClicked(_ sender: Any) {
frontCam = !frontCam
prepareCamera()
}
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
captureSession.removeInput(input)
}
}
if captureSession.inputs.isEmpty {
captureSession.addInput(captureDeviceInput)
}
}catch {
print(error.localizedDescription)
}
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = previewLayer
// self.view.layer.addSublayer(self.previewLayer)
self.view.layer.insertSublayer(self.previewLayer, at: 0)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString):NSNumber(value:kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.graymatics.customcamera")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
}
Please correct me if the code is not proper.
Finally found the solution
here is my code:
override func viewDidDisappear(_ animated: Bool) {
self.stopCaptureSession()
}
func stopCaptureSession () {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
session needs to be stopped while moving on from the current view controller.

Resources