ios didOutputSampleBuffer drop first few frames - ios

Iam making an app which needs to record video and audio usingAVCaptureVideoDataOutputSampleBufferDelegate
the functions i use are :
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!)
{
self.captureQueue.async {
if !self.isCapturing
{
return
}
var isVideo = true
if connection != self.videoConnection
{
isVideo = false
}
self.encoder!.encodeFrame(sampleBuffer: sampleBuffer, isVideo: isVideo)
}
}
and the encode frame function :
func encodeFrame(sampleBuffer : CMSampleBuffer , isVideo : Bool)
{
if (CMSampleBufferDataIsReady(sampleBuffer))
{
if self.writer.status == .unknown
{
print("INIT")
let startTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
self.writer.startWriting()
self.writer.startSession(atSourceTime: startTime)
}
if self.writer.status == .failed
{
print("writer failed : \(self.writer.error!)")
}
if isVideo
{
if self.writerVideoInput.isReadyForMoreMediaData
{
if self.writerVideoInput.append(sampleBuffer)
{
print("writing video")
}
else
{
print("failed to append video")
}
}
else
{
print("video input data isn't ready ")
}
}
else
{
if self.writerAudioInput.isReadyForMoreMediaData
{
if self.writerAudioInput.append(sampleBuffer)
{
print("writing audio")
}
else
{
print("failed to append audio")
}
}
else
{
print("audio input isn't ready")
}
}
}
else
{
print("sample buffer isnt ready ")
}
}
the problem is that when I start recording (setting isCapturing flag to true) the first few frames get dropped (the reason is FrameWasLate) , the documentation of Apple says that its because the sampleBuffer doesn't get released fast enough ! , but all I do is initializing the theAvassetwriter nothing more ! .
I tried to put the encoding function in a serial Queue but it didn't work !
whats wrong ?!

Related

iOS Video recording with AVFoundation

I need to open the camera and record the process. Also I have a timer, so according to the timer interval I should save multiple videos without stopping the record process.
So I use AVFoundation and in timer action I call 2 functions (stopRecording, startRecording).
TimerInterval is 4 seconds.
When I call stopRecording method "didFinishRecordingToOutputFileAtURL" delegate method does not return the record source immediately, it returns after 3 seconds, so I lose every second record.
Is there any other way to organize this kind of process or how can I fix this issue?
Thanks
func start(complition: (Error?, Bool)->()) {
setupSession { success in
if !success {
print("Error!")
return
}
setupPreview()
startSession()
let timeInterval = 4
timer = Timer.scheduledTimer(timeInterval: timeInterval, target: self, selector: #selector(timerAction), userInfo: nil, repeats: true)
}
}
func setupSession(complition: (Bool)->()) {
captureSession.beginConfiguration()
guard let camera = AVCaptureDevice.default(for: .video) else {
complition(false)
return
}
guard let mic = AVCaptureDevice.default(for: .audio) else {
complition(false)
return
}
do {
let videoInput = try AVCaptureDeviceInput(device: camera)
let audioInput = try AVCaptureDeviceInput(device: mic)
for input in [videoInput, audioInput] {
if captureSession.canAddInput(input) {
captureSession.addInput(input)
}
}
activeInput = videoInput
} catch {
print("Error setting device input: \(error)")
complition(false)
return
}
captureSession.addOutput(movieOutput)
captureSession.commitConfiguration()
}
func setupPreview() {
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = containerView.bounds
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
containerView.layer.addSublayer(previewLayer)
}
func startSession() {
if !captureSession.isRunning {
DispatchQueue.global(qos: .default).async { [weak self] in
self?.captureSession.startRunning()
}
}
}
func stopSession() {
if captureSession.isRunning {
DispatchQueue.global(qos: .default).async() { [weak self] in
self?.captureSession.stopRunning()
}
}
}
public func captureMovie() {
guard let connection = movieOutput.connection(with: .video) else {
return
}
if connection.isVideoStabilizationSupported {
connection.preferredVideoStabilizationMode = .auto
}
let device = activeInput.device
if device.isSmoothAutoFocusEnabled {
do {
try device.lockForConfiguration()
device.isSmoothAutoFocusEnabled = true
device.unlockForConfiguration()
} catch {
print("error: \(error)")
}
}
guard let outUrl = tempURL else { return }
movieOutput.startRecording(to: outUrl, recordingDelegate: self)
}
public func stopRecording() {
if movieOutput.isRecording {
movieOutput.stopRecording()
}
}
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
print(Date.now, " ", "file")
if let error = error {
print("error: \(error.localizedDescription)")
} else {
// Save the source
}
}
#objc private func timerAction() {
print(Date.now, " timerAction")
stopRecording()
captureMovie()
}

captureOutput not being called from delegate

So I realize there are several questions about this but I feel like I've reviewed them all and still haven't figured out what I did wrong or different. I call this class from a function that I know happens after viewDidLoad. What I am trying to do is be able to take a video stream from a given AVCaptureDevice or camera id and pass this into a WebView as a series of evaluateJavascript calls, I may need to optimize this later but I'm having trouble just getting captureOutput to be called. Certainly, I don't need a preview to be able to capture the output do it? I've confirmed the permissions are correct and the code reaches the point where the sample buffer delegate is being set on the capture session. Any ideas?
class CameraPlugin:
NSObject,
AVCaptureVideoDataOutputSampleBufferDelegate,
AVCaptureMetadataOutputObjectsDelegate,
AVCapturePhotoCaptureDelegate {
private var capturePhotoCompletion: ((Result<UIImage, Error>) -> ())?
private var scanBarcodeCompletion: ((Result<String, Error>) -> ())?
let captureSession = AVCaptureSession()
private var videoSampleListener: VideoSampleListener?
func startStreamingCamera(cameraId: String?, camera: AVCaptureDevice?, listener: VideoSampleListener) {
self.videoSampleListener = listener
var inputCam = camera
if (cameraId != nil) {
inputCam = self.retrieveVideoCaptureDeviceFromId(id: cameraId!)
if (inputCam == nil) {
return
}
} else if (inputCam == nil) {
return
}
self.haveCaptureDeviceAccess(type: .video) { granted in
if granted {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: inputCam!)
let captureDeviceOutput = AVCaptureVideoDataOutput()
guard
self.captureSession.canAddInput(captureDeviceInput),
self.captureSession.canAddOutput(captureDeviceOutput)
else {
return
}
self.captureSession.addInput(captureDeviceInput)
self.captureSession.addOutput(captureDeviceOutput)
captureDeviceOutput.setSampleBufferDelegate(self, queue: .global())
} catch {
}
} else {
}
}
}
/*
* Converts captured video frame to a jpeg image
*/
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
guard let cgImage = CIContext().createCGImage(ciImage, from: ciImage.extent) else {
return
}
let image = UIImage(cgImage: cgImage, scale: 1.0, orientation: .right)
guard let imageData = image.jpegData(compressionQuality: 0.7)?.base64EncodedString() else {
return
}
if (videoSampleListener != nil) {
videoSampleListener!.receivedVideoSample(imageData: imageData)
}
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
}
private func configurePhotoSettings() -> AVCapturePhotoSettings {
let settings = AVCapturePhotoSettings()
settings.isHighResolutionPhotoEnabled = true
return settings
}
private func retrieveBarcodeMetadataObjectTypes() -> [AVMetadataObject.ObjectType] {
return [
AVMetadataObject.ObjectType.upce,
AVMetadataObject.ObjectType.code39,
AVMetadataObject.ObjectType.code39Mod43,
AVMetadataObject.ObjectType.ean13,
AVMetadataObject.ObjectType.ean8,
AVMetadataObject.ObjectType.code93,
AVMetadataObject.ObjectType.code128,
AVMetadataObject.ObjectType.pdf417,
AVMetadataObject.ObjectType.qr,
AVMetadataObject.ObjectType.aztec,
AVMetadataObject.ObjectType.interleaved2of5,
AVMetadataObject.ObjectType.itf14,
AVMetadataObject.ObjectType.dataMatrix
]
}
private func haveCaptureDeviceAccess(type: AVMediaType, completion: #escaping (Bool) -> ()) {
switch AVCaptureDevice.authorizationStatus(for: type) {
case .denied:
completion(false)
case .notDetermined:
AVCaptureDevice.requestAccess(for: type) { granted in
completion(granted)
}
default:
completion(true)
}
}
func retrieveVideoCaptureDeviceFromId(id: String) -> AVCaptureDevice? {
return self.retrieveAvailableVideoCaptureDevices().first(where: { device in device.uniqueID == id })
}
func retrieveAvailableVideoCaptureDevices() -> [AVCaptureDevice] {
let discoverySession = AVCaptureDevice.DiscoverySession(
deviceTypes: self.retrievePlatformDeviceTypes(),
mediaType: .video,
position: .unspecified
)
return discoverySession.devices
}
private func retrievePlatformDeviceTypes() -> [AVCaptureDevice.DeviceType] {
var deviceTypes: [AVCaptureDevice.DeviceType] = [
.builtInDualCamera,
.builtInWideAngleCamera,
.builtInTelephotoCamera
]
if #available(iOS 11.1, *) {
deviceTypes += [
.builtInTrueDepthCamera
]
}
if #available(iOS 13.0, *) {
deviceTypes += [
.builtInDualWideCamera,
.builtInTripleCamera,
.builtInUltraWideCamera
]
}
return deviceTypes
}
}
The calling code:
guard let videoCaptureDevice = AVCaptureDevice.default(for: AVMediaType.video) else {
return
}
let camPlugin = CameraPlugin()
camPlugin.startStreamingCamera(cameraId: nil, camera: videoCaptureDevice, listener: self)
Update: There is actually nothing wrong with the code here. The issue I was having was that the instance of the class that is described above was being garbage collected.

iOS11:How can I use Vision framework track face across video?

i can track object across video ,but i can't track face.
when i use camera track face . the code print []
extension FaceTrackingViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let request = VNDetectFaceLandmarksRequest { [unowned self] request, error in
if let error = error {
self.presentAlertController(withTitle: self.title,
message: error.localizedDescription)
}
else {
print("\(request.results!)")
}
}
do {
try handler.perform([request], on: pixelBuffer!)
}
catch {
print(error)
}
}
}

No Video Output and [MC] Reading from public effective user settings. Error in Swift/iOS 11

Using Xcode 9 Beta for iOS 11:
I've followed a walkthrough on how to extract frames from an AVCaptureSession, but have not been able to get the capture to appear. While I have included the camera permissions in the info.plist file, the app seems to stall after opening and I get the following errors:
[App Name] does not have sandbox access for frZQaeyWLUvLjeuEK43hmg and IS NOT appropriately entitled
[MC] System group container for systemgroup.com.apple.configurationprofiles path is /private/var/containers/Shared/SystemGroup/systemgroup.com.apple.configurationprofiles
[MC] Reading from public effective user settings.
Here is the code for FrameExtractor.swift for reference:
import UIKit
import AVFoundation
protocol FrameExtractorDelegate: class {
func captured(image: UIImage)
}
class FrameExtractor: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate {
private let position = AVCaptureDevice.Position.front
private let quality = AVCaptureSession.Preset.medium
private var permissionGranted = false
private let sessionQueue = DispatchQueue(label: "session queue")
private let captureSession = AVCaptureSession()
private let context = CIContext()
weak var delegate: FrameExtractorDelegate?
override init() {
super.init()
checkPermission()
sessionQueue.async { [unowned self] in
self.configureSession()
self.captureSession.startRunning()
}
}
// MARK: AVSession configuration
private func checkPermission() {
switch AVCaptureDevice.authorizationStatus(for: AVMediaType.video) {
case .authorized:
permissionGranted = true
case .notDetermined:
requestPermission()
default:
permissionGranted = false
}
}
private func requestPermission() {
sessionQueue.suspend()
AVCaptureDevice.requestAccess(for: AVMediaType.video) { [unowned self] granted in
self.permissionGranted = granted
self.sessionQueue.resume()
}
}
private func configureSession() {
guard permissionGranted else { return }
captureSession.sessionPreset = quality
guard let captureDevice = selectCaptureDevice() else { return }
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else { return }
guard captureSession.canAddInput(captureDeviceInput) else { return }
captureSession.addInput(captureDeviceInput)
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "sample buffer"))
guard captureSession.canAddOutput(videoOutput) else { return }
captureSession.addOutput(videoOutput)
guard let connection = videoOutput.connection(with: AVFoundation.AVMediaType.video) else { return }
guard connection.isVideoOrientationSupported else { return }
guard connection.isVideoMirroringSupported else { return }
connection.videoOrientation = .portrait
connection.isVideoMirrored = position == .front
}
private func selectCaptureDevice() -> AVCaptureDevice? {
return AVCaptureDevice.default(for: AVMediaType.video)
// return AVCaptureDevice.devices().filter {
// ($0 as AnyObject).hasMediaType(AVMediaType.video) &&
// ($0 as AnyObject).position == position
// }.first
}
// MARK: Sample buffer to UIImage conversion
private func imageFromSampleBuffer(sampleBuffer: CMSampleBuffer) -> UIImage? {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return nil }
let ciImage = CIImage(cvPixelBuffer: imageBuffer)
guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else { return nil }
return UIImage(cgImage: cgImage)
}
// MARK: AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("Got a Frame!")
guard let uiImage = imageFromSampleBuffer(sampleBuffer: sampleBuffer) else { return }
DispatchQueue.main.async { [unowned self] in
self.delegate?.captured(image: uiImage)
}
}
}
And for ViewController.swift:
import UIKit
class ViewController: UIViewController, FrameExtractorDelegate{
#IBOutlet var imageView: UIImageView!
var frameExtractor: FrameExtractor!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
frameExtractor = FrameExtractor()
frameExtractor.delegate = self
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func captured(image: UIImage) {
imageView.image = image
}
}`
The issue is in a different function call in captureOutput. This is the new function call in iOS 11 for captureOutput in AVCaptureVideoDataOutputSampleBufferDelegate:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let uiImage = imageFromSampleBuffer(sampleBuffer: sampleBuffer) else { return }
DispatchQueue.main.async { [unowned self] in
self.delegate?.captured(image: uiImage)
}
}
Notice the change between "didOutput sampleBuffer:" and "didOutputSampleBuffer sampleBuffer:"

How to get an AVAssetReader to loop?

Hi I have been trying to figure out how to implement movie looping in GPUImage2, but have been unsuccessful so far. The MovieInput class in GPUImage2 uses AVAssetReader to playback the movie files, so I researched ways to loop AVAssetReader. I found this question on StackOverFlow dealing with this topic. AVFoundation to reproduce a video loop
The best answer was
"AVAssetReader doesn't support seeking or restarting, it is essentially a sequential decoder. You have to create a new AVAssetReader object to read the same samples again."
I tried to figure out how to connect the old assetReader to a new one one and I was not very successful and it crashed every time.
I was recommended to try something like this, but I am not exactly sure how to write the function generateAssetReader.
public func start() {
self.assetReader = generateAssetReader(asset: asset, readAudio: readAudio, videoOutputSettings: videoOutputSettings, audioOutputSettings: audioOutputSettings)
asset.loadValuesAsynchronously(forKeys:["tracks"], completionHandler:{
DispatchQueue.global(priority:DispatchQueue.GlobalQueuePriority.default).async(execute: {
guard (self.asset.statusOfValue(forKey: "tracks", error:nil) == .loaded) else { return }
guard self.assetReader.startReading() else {
print("Couldn't start reading")
return
}
var readerVideoTrackOutput:AVAssetReaderOutput? = nil;
for output in self.assetReader.outputs {
if(output.mediaType == AVMediaTypeVideo) {
readerVideoTrackOutput = output;
}
}
while (self.assetReader.status == .reading) {
self.readNextVideoFrame(from:readerVideoTrackOutput!)
}
if assetReader.status == .completed {
assetReader.cancelReading()
self.assetReader = nil
if self.loop {
self.start()
} else {
self.endProcessing()
}
}
}
Would anyone have a clue into solving this looping problem? This is a link to entire code of the MovieInput class.
https://github.com/BradLarson/GPUImage2/blob/master/framework/Source/iOS/MovieInput.swift
I found the answer in case anyone is wondering.
public func createReader() -> AVAssetReader
{
var assetRead:AVAssetReader!
do{
assetRead = try AVAssetReader.init(asset: self.asset)
let outputSettings:[String:AnyObject] = [(kCVPixelBufferPixelFormatTypeKey as String):NSNumber(value:Int32(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange))]
let readerVideoTrackOutput = AVAssetReaderTrackOutput(track:self.asset.tracks(withMediaType: AVMediaTypeVideo)[0], outputSettings:outputSettings)
readerVideoTrackOutput.alwaysCopiesSampleData = false
assetRead.add(readerVideoTrackOutput)
}catch{
}
return assetRead
}
public func start() {
self.assetReader = createReader()
asset.loadValuesAsynchronously(forKeys:["tracks"], completionHandler:{
DispatchQueue.global(priority:DispatchQueue.GlobalQueuePriority.default).async(execute: {
guard (self.asset.statusOfValue(forKey: "tracks", error:nil) == .loaded) else { return }
guard self.assetReader.startReading() else {
print("Couldn't start reading")
return
}
var readerVideoTrackOutput:AVAssetReaderOutput? = nil;
for output in self.assetReader.outputs {
if(output.mediaType == AVMediaTypeVideo) {
readerVideoTrackOutput = output;
}
}
while (self.assetReader.status == .reading) {
self.readNextVideoFrame(from:readerVideoTrackOutput!)
}
if (self.assetReader.status == .completed) {
self.assetReader.cancelReading()
if (self.loop) {
// TODO: Restart movie processing
self.start()
} else {
self.endProcessing()
}
}
})
})
}

Resources