CIContext initialization crash - ios

Background:
I'm running a swift 2 application with the following two options.
Option A:
The user can enter a number to sign in. In this case, his/her picture is shown in a UIImageView.
Option B:
The user can use an NFC tag to sign in. In this case, the UIImageView is replaced with a camera layer that shows live camera stream and uses CIContext to capture an image on a button press.
Problem:
The issue I'm facing is that sometimes, when I choose option A (not using the camera layer), the app crashes. Since I'm unable to reproduce the crash deterministically, I have hit a dead end to understand why the app is crashing.
EDIT: The camera layer is used in both options but is hidden in option A.
Crashlytics generates the following crash log:
0 libswiftCore.dylib specialized _fatalErrorMessage(StaticString, StaticString, StaticString, UInt) -> () + 44
1 CameraLayerView.swift line 20 CameraLayerView.init(coder : NSCoder) -> CameraLayerView?
2 CameraLayerView.swift line 0 #objc CameraLayerView.init(coder : NSCoder) -> CameraLayerView?
3 UIKit -[UIClassSwapper initWithCoder:] + 248
32 UIKit UIApplicationMain + 208
33 AppDelegate.swift line 17 main
34 libdispatch.dylib (Missing)
I've checked line#20 in CameraLayerView but it is just an initialization statement
private let ciContext = CIContext(EAGLContext: EAGLContext(API: .OpenGLES2))
Mentioned below is the CameraLayerView file. Any help would be appreciated
var captureSession = AVCaptureSession()
var sessionOutput = AVCaptureVideoDataOutput()
var previewLayer = AVCaptureVideoPreviewLayer()
private var pixelBuffer : CVImageBuffer!
private var attachments : CFDictionary!
private var ciImage : CIImage!
private let ciContext = CIContext(EAGLContext: EAGLContext(API: .OpenGLES2))
private var imageOptions : [String : AnyObject]!
var faceFound = false
var image : UIImage!
override func layoutSubviews() {
previewLayer.position = CGPoint(x: self.frame.width/2, y: self.frame.height/2)
previewLayer.bounds = self.frame
self.layer.borderWidth = 2.0
self.layer.borderColor = UIColor.redColor().CGColor
}
func loadCamera() {
let camera = AVCaptureDevice.devicesWithMediaType(AVMediaTypeVideo)
for device in camera {
if device.position == .Front {
do{
for input in captureSession.inputs {
captureSession.removeInput(input as! AVCaptureInput)
}
for output in captureSession.outputs {
captureSession.removeOutput(output as! AVCaptureOutput)
}
previewLayer.removeFromSuperlayer()
previewLayer.session = nil
let input = try AVCaptureDeviceInput(device: device as! AVCaptureDevice)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
sessionOutput.videoSettings = [String(kCVPixelBufferPixelFormatTypeKey) : Int(kCVPixelFormatType_32BGRA)]
sessionOutput.setSampleBufferDelegate(self, queue: dispatch_get_global_queue(Int(QOS_CLASS_BACKGROUND.rawValue), 0))
sessionOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(sessionOutput) {
captureSession.addOutput(sessionOutput)
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
captureSession.startRunning()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
switch UIDevice.currentDevice().orientation.rawValue {
case 1:
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.Portrait
break
case 2:
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.PortraitUpsideDown
break
case 3:
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.LandscapeRight
break
case 4:
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.LandscapeLeft
break
default:
break
}
self.layer.addSublayer(previewLayer)
}
}
} catch {
print("Error")
}
}
}
}
func takePicture() -> UIImage {
self.previewLayer.removeFromSuperlayer()
self.captureSession.stopRunning()
return image
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
attachments = CMCopyDictionaryOfAttachments(kCFAllocatorDefault, sampleBuffer, kCMAttachmentMode_ShouldPropagate)
ciImage = CIImage(CVPixelBuffer: pixelBuffer!, options: attachments as? [String : AnyObject])
if UIDevice.currentDevice().orientation == .PortraitUpsideDown {
imageOptions = [CIDetectorImageOrientation : 8]
} else if UIDevice.currentDevice().orientation == .LandscapeLeft {
imageOptions = [CIDetectorImageOrientation : 3]
} else if UIDevice.currentDevice().orientation == .LandscapeRight {
imageOptions = [CIDetectorImageOrientation : 1]
} else {
imageOptions = [CIDetectorImageOrientation : 6]
}
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: ciContext, options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])
let features = faceDetector.featuresInImage(ciImage, options: imageOptions)
if features.count == 0 {
if faceFound == true {
faceFound = false
dispatch_async(dispatch_get_main_queue()) {
self.layer.borderColor = UIColor.redColor().CGColor
}
}
} else {
if UIDevice.currentDevice().orientation == .PortraitUpsideDown {
image = UIImage(CGImage: ciContext.createCGImage(ciImage, fromRect: ciImage.extent), scale: 1.0, orientation: UIImageOrientation.Left)
} else if UIDevice.currentDevice().orientation == .LandscapeLeft {
image = UIImage(CGImage: ciContext.createCGImage(ciImage, fromRect: ciImage.extent), scale: 1.0, orientation: UIImageOrientation.Down)
} else if UIDevice.currentDevice().orientation == .LandscapeRight {
image = UIImage(CGImage: ciContext.createCGImage(ciImage, fromRect: ciImage.extent), scale: 1.0, orientation: UIImageOrientation.Up)
} else {
image = UIImage(CGImage: ciContext.createCGImage(ciImage, fromRect: ciImage.extent), scale: 1.0, orientation: UIImageOrientation.Right)
}
if faceFound == false {
faceFound = true
for feature in features {
if feature.isKindOfClass(CIFaceFeature) {
dispatch_async(dispatch_get_main_queue()) {
self.layer.borderColor = UIColor.greenColor().CGColor
}
}
}
}
}
}

I tested a theory and it worked. Since ciContext was being initialised with view initialisation, it seemed like the app was crashing due to a race condition. I moved the initialisation for ciContext into my loadCamera method and it hasn't crashed since.
UPDATE
Another thing I noticed was that, in various tutorials and blog posts on the internet, the statement let ciContext = CIContext(EAGLContext: EAGLContext(API: .OpenGLES2)) was declared in two seperate statements such that it became
let eaglContext = EAGLContext(API: .OpenGLES2)
let ciContext = CIContext(EAGLContext: eaglContext)
I still don't what exactly was causing the app to crash in the first place but these two changes seemed to have fix the problem
CORRECT ANSWER
Finally found the culprit. In my viewController that used ciContext, I had a timer that was not being invalidated hence keeping a strong reference to the viewController. On every subsequent visit, it would create a new viewController while the previous one was never released from memory. This resulted in the memory filling up overtime. Once it passed a certain threshold, the ciContext intialiser would return nil because of low memory which would in turn crash the app.

Related

iOS: Capture image during camera preview without action

I'm trying to captur image from camera preview but can't get image from preview layer. What I want to do is kinda similar to iOS 15 OCR mode in Photo app which processes image during camera preview, does not require user to take a shot nor start recording video, just process image in preview. I looked into docs and searched on net but could not find any useful info.
What I tried was, save previewLayer and call previewLayer.draw(in: context) periodically. But the image drawn in the context is blank. Now I wonder if it is possible first of all.
There might be some security issue there to restrict processing image in camera preview that only genuine app is allowed to access I guess, so I probably need to find other ways.
Please enlighten me if any workaround.
Thanks!
Ok. With MadProgrammer's help I got things working properly. Anurag Ajwani's site is very helpful.
Here is my simple snippet to capture video frames. You need to ensure permissions before CameraView gets instantiated.
class VideoCapture: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate {
//private var previewLayer: AVCaptureVideoPreviewLayer? = nil
private var session: AVCaptureSession? = nil
private var videoOutput: AVCaptureVideoDataOutput? = nil
private var videoHandler: ((UIImage) -> Void)?
override init() {
super.init()
let deviceSession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualWideCamera, .builtInWideAngleCamera], mediaType: .video, position: .back)
guard deviceSession.devices.count > 0 else { return }
if let input = try? AVCaptureDeviceInput(device: deviceSession.devices.first!) {
let session = AVCaptureSession()
session.addInput(input)
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)] as [String:Any]
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "my.image.handling.queue"))
videoOutput.alwaysDiscardsLateVideoFrames = true
if session.canAddOutput(videoOutput) {
session.sessionPreset = .high
session.addOutput(videoOutput)
self.videoOutput = videoOutput
}
for connection in videoOutput.connections {
if connection.isVideoOrientationSupported {
connection.videoOrientation = .portrait
}
}
session.commitConfiguration()
self.session = session
/*
self.previewLayer = AVCaptureVideoPreviewLayer(session: session)
if let previewLayer = self.previewLayer {
previewLayer.videoGravity = .resizeAspectFill
layer.insertSublayer(previewLayer, at: 0)
CameraPreviewView.initialized = true
}
*/
}
}
func startCapturing(_ videoHandler: #escaping (UIImage) -> Void) -> Void {
if let session = session {
session.startRunning()
}
self.videoHandler = videoHandler
}
// AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
debugPrint("unable to get video frame")
return
}
//print("got video frame")
if let videoHandler = self.videoHandler {
let rect = CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(imageBuffer), height: CVPixelBufferGetHeight(imageBuffer))
let ciImage = CIImage.init(cvImageBuffer: imageBuffer)
let ciContext = CIContext()
let cgImage = ciContext.createCGImage(ciImage, from: rect)
guard cgImage != nil else {return }
let uiImage = UIImage(cgImage: cgImage!)
videoHandler(uiImage)
}
}
}
struct CameraView: View {
#State var capturedVideo: UIImage? = nil
let videoCapture = VideoCapture()
var body: some View {
VStack {
ZStack(alignment: .center) {
if let capturedVideo = self.capturedVideo {
Image(uiImage: capturedVideo)
.resizable()
.scaledToFill()
}
}
}
.background(Color.black)
.onAppear {
self.videoCapture.startCapturing { uiImage in
self.capturedVideo = uiImage
}
}
}

Google Face Detection crashing when converting to image and trying to detect face

I am creating a custom camera with filters. When I add the following line it crashes without showing any exception.
//Setting video output
func setupBuffer() {
videoBuffer = AVCaptureVideoDataOutput()
videoBuffer?.alwaysDiscardsLateVideoFrames = true
videoBuffer?.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32RGBA)]
videoBuffer?.setSampleBufferDelegate(self, queue: DispatchQueue.main)
captureSession?.addOutput(videoBuffer)
}
public func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
if connection.videoOrientation != .portrait {
connection.videoOrientation = .portrait
}
guard let image = GMVUtility.sampleBufferTo32RGBA(sampleBuffer) else {
print("No Image 😂")
return
}
pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
ciImage = CIImage(cvImageBuffer: pixelBuffer!, options: CMCopyDictionaryOfAttachments(kCFAllocatorDefault, sampleBuffer, kCMAttachmentMode_ShouldPropagate)as! [String : Any]?)
CameraView.filter = CIFilter(name: "CIPhotoEffectProcess")
CameraView.filter?.setValue(ciImage, forKey: kCIInputImageKey)
let cgimg = CameraView.context.createCGImage(CameraView.filter!.outputImage!, from: ciImage.extent)
DispatchQueue.main.async {
self.preview.image = UIImage(cgImage: cgimg!)
}
}
But it's crashing on -
guard let image = GMVUtility.sampleBufferTo32RGBA(sampleBuffer) else {
print("No Image 😂")
return
}
When I pass image which is created from CIImage, it doesn't recognize the face in the image.
Complete code file is https://www.dropbox.com/s/y1ewd1sh18h3ezj/CameraView.swift.zip?dl=0
1) Create separate queue for buffer.
fileprivate var videoDataOutputQueue = DispatchQueue(label: "VideoDataOutputQueue")
2) Setup buffer with this
let videoBuffer = AVCaptureVideoDataOutput()
videoBuffer?.alwaysDiscardsLateVideoFrames = true
videoBuffer?.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)]
videoBuffer?.setSampleBufferDelegate(self, queue: videoDataOutputQueue ) //
captureSession?.addOutput(videoBuffer)

Camera only takes pictures in portrait mode swift

How can you set a camera into landscape mode? Everytime I take a photo, the image gets saved as a portrait image. When the device is in landscape mode the photo looks fine but if I see it in the camera roll it's still portrait mode.
This is my take photo function:
// take a photo
#IBAction func takePhoto(sender: AnyObject) {
self.fullScreenView.hidden = false
self.recordButton.enabled = false
self.takephoto.enabled = false
self.recordButton.hidden = true
self.takephoto.hidden = true
session.startRunning()
// customize the quality level or bitrate of the output photo
session.sessionPreset = AVCaptureSessionPresetPhoto
// add the AVCaptureVideoPreviewLayer to the view and set the view in fullscreen
fullScreenView.frame = view.bounds
videoPreviewLayer.frame = fullScreenView.bounds
fullScreenView.layer.addSublayer(videoPreviewLayer)
// add action to fullScreenView
gestureFullScreenView = UITapGestureRecognizer(target: self, action: #selector(ViewController.takePhoto(_:)))
self.fullScreenView.addGestureRecognizer(gestureFullScreenView)
// add action to myView
gestureView = UITapGestureRecognizer(target: self, action: #selector(ViewController.setFrontpage(_:)))
self.view.addGestureRecognizer(gestureView)
if (preview == true) {
if let videoConnection = stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo) {
// code for photo capture goes here...
stillImageOutput?.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: { (sampleBuffer, error) -> Void in
// process the image data (sampleBuffer) here to get an image file we can put in our view
if (sampleBuffer != nil) {
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let image = UIImage(data: imageData, scale: 1.0)
self.fullScreenView.hidden = true
self.fullScreenView.gestureRecognizers?.forEach(self.fullScreenView.removeGestureRecognizer)
self.session.stopRunning()
// save image to the library
UIImageWriteToSavedPhotosAlbum(image!, nil, nil, nil)
self.imageViewBackground = UIImageView(frame: self.view.bounds)
self.imageViewBackground.image = image
self.imageViewBackground.tag = self.key
self.view.addSubview(self.imageViewBackground)
}
})
}
}
else {
preview = true
}
}
My preview looks like that and that's ok:
http://img5.fotos-hochladen.net/uploads/bildschirmfotom4s7diaehy.png
but in the end it looks like that:
http://img5.fotos-hochladen.net/uploads/bildschirmfoto3c2rlwtevf.png
Thanks in advance!
Because your videoConnection's orientation is always in portrait no matters your device is in portrait or landscape. So you should adjust the videoConnection's orientation to the correct one before taking the still photo
Add the following method to get videoOrientation for the current deviceOrientation
func videoOrientation(for deviceOrientation: UIDeviceOrientation) -> AVCaptureVideoOrientation {
switch deviceOrientation {
case UIDeviceOrientation.portrait:
return AVCaptureVideoOrientation.portrait
case UIDeviceOrientation.landscapeLeft:
return AVCaptureVideoOrientation.landscapeRight
case UIDeviceOrientation.landscapeRight:
return AVCaptureVideoOrientation.landscapeLeft
case UIDeviceOrientation.portraitUpsideDown:
return AVCaptureVideoOrientation.portraitUpsideDown
default:
return AVCaptureVideoOrientation.portrait
}
}
Right after the following line
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo) {
Add the following line
videoConnection.videoOrientation = videoOrientation(for: UIDevice.current.orientation)
Note: If your app supports only portrait or landscape mode, this issue still happens because UIDevice.current.orientation will always return the supported orientation. To overcome this, you can use CoreMotion to detect device orientation, then pass it to videoOrientation(for:) method.
Hope this helps :)
I got the answer from #Yodagama but he didn't add how to detect the orientation
if let photoOutputConnection = self.photoOutput.connection(with: .video) {
// USE the below function HERE
photoOutputConnection.videoOrientation = videoOrientation()
}
photoOutput.capturePhoto(with: settings, delegate: self)
func to detect device orientation:
func videoOrientation() -> AVCaptureVideoOrientation {
var videoOrientation: AVCaptureVideoOrientation!
let orientation: UIDeviceOrientation = UIDevice.current.orientation
switch orientation {
case .faceUp, .faceDown, .unknown:
// let interfaceOrientation = UIApplication.shared.statusBarOrientation
if let interfaceOrientation = UIApplication.shared.windows.first(where: { $0.isKeyWindow })?.windowScene?.interfaceOrientation {
switch interfaceOrientation {
case .portrait, .portraitUpsideDown, .unknown:
videoOrientation = .portrait
case .landscapeLeft:
videoOrientation = .landscapeRight
case .landscapeRight:
videoOrientation = .landscapeLeft
#unknown default:
videoOrientation = .portrait
}
}
case .portrait, .portraitUpsideDown:
videoOrientation = .portrait
case .landscapeLeft:
videoOrientation = .landscapeRight
case .landscapeRight:
videoOrientation = .landscapeLeft
#unknown default:
videoOrientation = .portrait
}
return videoOrientation
}
Possible Solution: Saving Image as JPEG instead of PNG
This occurs because PNGs do not store orientation information. Save the photo as a JPG instead and it will be oriented correctly.
Use this code to convert your image to JPG immediately after taking the image (the second line is the operative one here):
let image = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let imageData:NSData = UIImageJPEGRepresentation(image, 0.9)! // 0.9 is compression value: 0.0 is most compressed/lowest quality and 1.0 is least compressed/highest quality
Source + more info: https://stackoverflow.com/a/34796890/5700898
If that doesn't work
I've edited your code in a couple places, see if the below code now works as you hope:
// initialize saving photo capabilities
func image(image: UIImage, didFinishSavingWithError error: NSError?, contextInfo:UnsafePointer<Void>) {
if error == nil {
print("image saved")
} else {
print("save error: \(error?.localizedDescription)")
}
}
// take a photo
#IBAction func takePhoto(sender: AnyObject) {
self.fullScreenView.hidden = false
self.recordButton.enabled = false
self.takephoto.enabled = false
self.recordButton.hidden = true
self.takephoto.hidden = true
session.startRunning()
// customize the quality level or bitrate of the output photo
session.sessionPreset = AVCaptureSessionPresetPhoto
// add the AVCaptureVideoPreviewLayer to the view and set the view in fullscreen
fullScreenView.frame = view.bounds
videoPreviewLayer.frame = fullScreenView.bounds
fullScreenView.layer.addSublayer(videoPreviewLayer)
// add action to fullScreenView
gestureFullScreenView = UITapGestureRecognizer(target: self, action: #selector(ViewController.takePhoto(_:)))
self.fullScreenView.addGestureRecognizer(gestureFullScreenView)
// add action to myView
gestureView = UITapGestureRecognizer(target: self, action: #selector(ViewController.setFrontpage(_:)))
self.view.addGestureRecognizer(gestureView)
if (preview == true) {
if let videoConnection = stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo) {
// code for photo capture goes here...
stillImageOutput?.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: { (sampleBuffer, error) -> Void in
// process the image data (sampleBuffer) here to get an image file we can put in our view
if (sampleBuffer != nil) {
self.stillImageOutput!.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if let videoConnection = self.stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo){
videoConnection.videoOrientation = self.interfaceToVideoOrientation()
self.stillImageOutput!.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: {
(sampleBuffer, error) in
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let image = UIImage(data: imageData)
self.fullScreenView.hidden = true
self.fullScreenView.gestureRecognizers?.forEach(self.fullScreenView.removeGestureRecognizer)
self.session.stopRunning()
// save image to the library
UIImageWriteToSavedPhotosAlbum(image, self, #selector(ViewController.image(_:didFinishSavingWithError:contextInfo:)), nil)
self.imageViewBackground = UIImageView(frame: self.view.bounds)
self.imageViewBackground.image = image
self.imageViewBackground.tag = self.key
self.view.addSubview(self.imageViewBackground)
}
})
}
}
else {
preview = true
}
}

App is crashing silently during custom camera (Swift)

The app is crashing at random points in this function. I believe I need to scale it down but I am not sure. The only requirements I have for the image is that it remains a square and it remains decently sized because I need it to be big enough to take the entire screens width.
Here is an error that sometimes comes along with the crash:
warning: could not load any Objective-C class information. This will significantly reduce the quality of type information available.
#IBAction func didPressTakePhoto(sender: UIButton) {
self.previewLayer?.connection.enabled = false
if let videoConnection = stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo) {
videoConnection.videoOrientation = AVCaptureVideoOrientation.Portrait
stillImageOutput?.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: {(sampleBuffer, error) in
if (sampleBuffer != nil) {
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let dataProvider = CGDataProviderCreateWithCFData(imageData)
let cgImageRef = CGImageCreateWithJPEGDataProvider(dataProvider, nil, true, CGColorRenderingIntent.RenderingIntentDefault)
var image = UIImage()
if UIDevice.currentDevice().orientation == .Portrait{
image = UIImage(CGImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.Right)
}else if UIDevice.currentDevice().orientation == .LandscapeLeft{
image = UIImage(CGImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.Up)
}else if UIDevice.currentDevice().orientation == .LandscapeRight{
image = UIImage(CGImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.Down)
}
//Crop the image to a square
let imageSize: CGSize = image.size
let width: CGFloat = imageSize.width
let height: CGFloat = imageSize.height
if width != height {
let newDimension: CGFloat = min(width, height)
let widthOffset: CGFloat = (width - newDimension) / 2
let heightOffset: CGFloat = (height - newDimension) / 2
UIGraphicsBeginImageContextWithOptions(CGSizeMake(newDimension, newDimension), false, 0.0)
image.drawAtPoint(CGPointMake(-widthOffset, -heightOffset), blendMode: .Copy, alpha: 1.0)
image = UIGraphicsGetImageFromCurrentImageContext()
let imageData: NSData = UIImageJPEGRepresentation(image, 0.1)!
UIGraphicsEndImageContext()
self.captImage = UIImage(data: imageData)!
}
}
self.performSegueWithIdentifier("fromCustomCamera", sender: self)
})
}
}
This code is running in my viewDidAppear and stillImageOutput is returning nil when I take a photo.
if self.isRunning == false{
captureSession = AVCaptureSession()
captureSession!.sessionPreset = AVCaptureSessionPresetPhoto
let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
var error: NSError?
do {
input = try AVCaptureDeviceInput(device: backCamera)
} catch let error1 as NSError {
error = error1
print(error)
input = nil
}
if error == nil && captureSession!.canAddInput(input) {
captureSession!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput!.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if captureSession!.canAddOutput(stillImageOutput) {
captureSession!.addOutput(stillImageOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer!.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer!.connection?.videoOrientation = AVCaptureVideoOrientation.Portrait
previewView.layer.addSublayer(previewLayer!)
captureSession!.startRunning()
self.isRunning = true
}
}
}
Fixed it. The reason it was crashing was actually due to my images being way too big. I had to compress them.

Simulate AVLayerVideoGravityResizeAspectFill: crop and center video to mimic preview without losing sharpness

Based on this SO post, the code below rotates, centers, and crops a video captured live by the user.
The capture session uses AVCaptureSessionPresetHigh for the preset value, and the preview layer uses AVLayerVideoGravityResizeAspectFill for video gravity. This preview is extremely sharp.
The exported video, however, is not as sharp, ostensibly because scaling from the 1920x1080 resolution for the back camera on the 5S to 320x568 (target size for the exported video) introduces fuzziness from throwing away pixels?
Assuming there is no way to scale from 1920x1080 to 320x568 without some fuzziness, the question becomes: how to mimic the sharpness of the preview layer?
Somehow Apple is using an algorithm to convert a 1920x1080 video into a crisp-looking preview frame of 320x568.
Is there a way to mimic this with either AVAssetWriter or AVAssetExportSession?
func cropVideo() {
// Set start time
let startTime = NSDate().timeIntervalSince1970
// Create main composition & its tracks
let mainComposition = AVMutableComposition()
let compositionVideoTrack = mainComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
let compositionAudioTrack = mainComposition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
// Get source video & audio tracks
let videoPath = getFilePath(curSlice!.getCaptureURL())
let videoURL = NSURL(fileURLWithPath: videoPath)
let videoAsset = AVURLAsset(URL: videoURL, options: nil)
let sourceVideoTrack = videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0]
let sourceAudioTrack = videoAsset.tracksWithMediaType(AVMediaTypeAudio)[0]
let videoSize = sourceVideoTrack.naturalSize
// Get rounded time for video
let roundedDur = floor(curSlice!.getDur() * 100) / 100
let videoDur = CMTimeMakeWithSeconds(roundedDur, 100)
// Add source tracks to composition
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoDur), ofTrack: sourceVideoTrack, atTime: kCMTimeZero)
try compositionAudioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoDur), ofTrack: sourceAudioTrack, atTime: kCMTimeZero)
} catch {
print("Error with insertTimeRange while exporting video: \(error)")
}
// Create video composition
// -- Set video frame
let outputSize = view.bounds.size
let videoComposition = AVMutableVideoComposition()
print("Video composition duration: \(CMTimeGetSeconds(mainComposition.duration))")
// -- Set parent layer
let parentLayer = CALayer()
parentLayer.frame = CGRectMake(0, 0, outputSize.width, outputSize.height)
parentLayer.contentsGravity = kCAGravityResizeAspectFill
// -- Set composition props
videoComposition.renderSize = CGSize(width: outputSize.width, height: outputSize.height)
videoComposition.frameDuration = CMTimeMake(1, Int32(frameRate))
// -- Create video composition instruction
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, videoDur)
// -- Use layer instruction to match video to output size, mimicking AVLayerVideoGravityResizeAspectFill
let videoLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: compositionVideoTrack)
let videoTransform = getResizeAspectFillTransform(videoSize, outputSize: outputSize)
videoLayerInstruction.setTransform(videoTransform, atTime: kCMTimeZero)
// -- Add layer instruction
instruction.layerInstructions = [videoLayerInstruction]
videoComposition.instructions = [instruction]
// -- Create video layer
let videoLayer = CALayer()
videoLayer.frame = parentLayer.frame
// -- Add sublayers to parent layer
parentLayer.addSublayer(videoLayer)
// -- Set animation tool
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, inLayer: parentLayer)
// Create exporter
let outputURL = getFilePath(getUniqueFilename(gMP4File))
let exporter = AVAssetExportSession(asset: mainComposition, presetName: AVAssetExportPresetHighestQuality)!
exporter.outputURL = NSURL(fileURLWithPath: outputURL)
exporter.outputFileType = AVFileTypeMPEG4
exporter.videoComposition = videoComposition
exporter.shouldOptimizeForNetworkUse = true
exporter.canPerformMultiplePassesOverSourceMediaData = true
// Export to video
exporter.exportAsynchronouslyWithCompletionHandler({
// Log status
let asset = AVAsset(URL: exporter.outputURL!)
print("Exported slice video. Tracks: \(asset.tracks.count). Duration: \(CMTimeGetSeconds(asset.duration)). Size: \(exporter.estimatedOutputFileLength). Status: \(getExportStatus(exporter)). Output URL: \(exporter.outputURL!). Export time: \( NSDate().timeIntervalSince1970 - startTime).")
// Tell delegate
//delegate.didEndExport(exporter)
self.curSlice!.setOutputURL(exporter.outputURL!.lastPathComponent!)
gUser.save()
})
}
// Returns transform, mimicking AVLayerVideoGravityResizeAspectFill, that converts video of <inputSize> to one of <outputSize>
private func getResizeAspectFillTransform(videoSize: CGSize, outputSize: CGSize) -> CGAffineTransform {
// Compute ratios between video & output sizes
let widthRatio = outputSize.width / videoSize.width
let heightRatio = outputSize.height / videoSize.height
// Set scale to larger of two ratios since goal is to fill output bounds
let scale = widthRatio >= heightRatio ? widthRatio : heightRatio
// Compute video size after scaling
let newWidth = videoSize.width * scale
let newHeight = videoSize.height * scale
// Compute translation required to center image after scaling
// -- Assumes CoreAnimationTool places video frame at (0, 0). Because scale transform is applied first, we must adjust
// each translation point by scale factor.
let translateX = (outputSize.width - newWidth) / 2 / scale
let translateY = (outputSize.height - newHeight) / 2 / scale
// Set transform to resize video while retaining aspect ratio
let resizeTransform = CGAffineTransformMakeScale(scale, scale)
// Apply translation & create final transform
let finalTransform = CGAffineTransformTranslate(resizeTransform, translateX, translateY)
// Return final transform
return finalTransform
}
320x568 video taken with Tim's code:
640x1136 video taken with Tim's code:
Try this. Start a new Single View project in Swift, replace the ViewController with this code and you should be good to go!
I've set up a previewLayer which is a different size from the output, change it at the top of the file.
I added some basic orientation support. Outputs slightly different sizes for Landscape Vs. Portrait. You can specify whatever video size dimensions you like in here and it should work fine.
Checkout the videoSettings dictionary (line 278ish) for the codec and sizes of the output file. You can also add other settings in here to deal with keyFrameIntervals etc. to tweak outputsize.
I added a recording image to show when it's recording (Tap starts, tap ends), you'll need to add some asset into Assets.xcassets called recording (or comment out that line 106 where it trys to load it).
That's pretty much it. Good luck!
Oh, it's dumping the video into a project directory, you'll need to go to Window / Devices and download the Container to see the video easily. In the TODO there's a section where you could hook in and copy the file to the PhotoLibrary (makes testing WAY easier).
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
let CAPTURE_SIZE_LANDSCAPE: CGSize = CGSizeMake(1280, 720)
let CAPTURE_SIZE_PORTRAIT: CGSize = CGSizeMake(720, 1280)
var recordingImage : UIImageView = UIImageView()
var previewLayer : AVCaptureVideoPreviewLayer?
var audioQueue : dispatch_queue_t?
var videoQueue : dispatch_queue_t?
let captureSession = AVCaptureSession()
var assetWriter : AVAssetWriter?
var assetWriterInputCamera : AVAssetWriterInput?
var assetWriterInputAudio : AVAssetWriterInput?
var outputConnection: AVCaptureConnection?
var captureDeviceBack : AVCaptureDevice?
var captureDeviceFront : AVCaptureDevice?
var captureDeviceMic : AVCaptureDevice?
var sessionSetupDone: Bool = false
var isRecordingStarted = false
//var recordingStartedTime = kCMTimeZero
var videoOutputURL : NSURL?
var captureSize: CGSize = CGSizeMake(1280, 720)
var previewFrame: CGRect = CGRectMake(0, 0, 180, 360)
var captureDeviceTrigger = true
var captureDevice: AVCaptureDevice? {
get {
return captureDeviceTrigger ? captureDeviceFront : captureDeviceBack
}
}
override func supportedInterfaceOrientations() -> UIInterfaceOrientationMask {
return UIInterfaceOrientationMask.AllButUpsideDown
}
override func shouldAutorotate() -> Bool {
if isRecordingStarted {
return false
}
if UIDevice.currentDevice().orientation == UIDeviceOrientation.PortraitUpsideDown {
return false
}
if let cameraPreview = self.previewLayer {
if let connection = cameraPreview.connection {
if connection.supportsVideoOrientation {
switch UIDevice.currentDevice().orientation {
case .LandscapeLeft:
connection.videoOrientation = .LandscapeRight
case .LandscapeRight:
connection.videoOrientation = .LandscapeLeft
case .Portrait:
connection.videoOrientation = .Portrait
case .FaceUp:
return false
case .FaceDown:
return false
default:
break
}
}
}
}
return true
}
override func viewDidLoad() {
super.viewDidLoad()
setupViewControls()
//self.recordingStartedTime = kCMTimeZero
// Setup capture session related logic
videoQueue = dispatch_queue_create("video_write_queue", DISPATCH_QUEUE_SERIAL)
audioQueue = dispatch_queue_create("audio_write_queue", DISPATCH_QUEUE_SERIAL)
setupCaptureDevices()
pre_start()
}
//MARK: UI methods
func setupViewControls() {
// TODO: I have an image (red circle) in an Assets.xcassets. Replace the following with your own image
recordingImage.frame = CGRect(x: 0, y: 0, width: 50, height: 50)
recordingImage.image = UIImage(named: "recording")
recordingImage.hidden = true
self.view.addSubview(recordingImage)
// Setup tap to record and stop
let tapGesture = UITapGestureRecognizer(target: self, action: "didGetTapped:")
tapGesture.numberOfTapsRequired = 1
self.view.addGestureRecognizer(tapGesture)
}
func didGetTapped(selector: UITapGestureRecognizer) {
if self.isRecordingStarted {
self.view.gestureRecognizers![0].enabled = false
recordingImage.hidden = true
self.stopRecording()
} else {
recordingImage.hidden = false
self.startRecording()
}
self.isRecordingStarted = !self.isRecordingStarted
}
func switchCamera(selector: UIButton) {
self.captureDeviceTrigger = !self.captureDeviceTrigger
pre_start()
}
//MARK: Video logic
func setupCaptureDevices() {
let devices = AVCaptureDevice.devices()
for device in devices {
if device.hasMediaType(AVMediaTypeVideo) {
if device.position == AVCaptureDevicePosition.Front {
captureDeviceFront = device as? AVCaptureDevice
NSLog("Video Controller: Setup. Front camera is found")
}
if device.position == AVCaptureDevicePosition.Back {
captureDeviceBack = device as? AVCaptureDevice
NSLog("Video Controller: Setup. Back camera is found")
}
}
if device.hasMediaType(AVMediaTypeAudio) {
captureDeviceMic = device as? AVCaptureDevice
NSLog("Video Controller: Setup. Audio device is found")
}
}
}
func alertPermission() {
let permissionAlert = UIAlertController(title: "No Permission", message: "Please allow access to Camera and Microphone", preferredStyle: UIAlertControllerStyle.Alert)
permissionAlert.addAction(UIAlertAction(title: "Go to settings", style: .Default, handler: { (action: UIAlertAction!) in
print("Video Controller: Permission for camera/mic denied. Going to settings")
UIApplication.sharedApplication().openURL(NSURL(string: UIApplicationOpenSettingsURLString)!)
print(UIApplicationOpenSettingsURLString)
}))
presentViewController(permissionAlert, animated: true, completion: nil)
}
func pre_start() {
NSLog("Video Controller: pre_start")
let videoPermission = AVCaptureDevice.authorizationStatusForMediaType(AVMediaTypeVideo)
let audioPermission = AVCaptureDevice.authorizationStatusForMediaType(AVMediaTypeAudio)
if (videoPermission == AVAuthorizationStatus.Denied) || (audioPermission == AVAuthorizationStatus.Denied) {
self.alertPermission()
pre_start()
return
}
if (videoPermission == AVAuthorizationStatus.Authorized) {
self.start()
return
}
AVCaptureDevice.requestAccessForMediaType(AVMediaTypeVideo, completionHandler: { (granted :Bool) -> Void in
self.pre_start()
})
}
func start() {
NSLog("Video Controller: start")
if captureSession.running {
captureSession.beginConfiguration()
if let currentInput = captureSession.inputs[0] as? AVCaptureInput {
captureSession.removeInput(currentInput)
}
do {
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
} catch {
print("Video Controller: begin session. Error adding video input device")
}
captureSession.commitConfiguration()
return
}
do {
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
try captureSession.addInput(AVCaptureDeviceInput(device: captureDeviceMic))
} catch {
print("Video Controller: start. error adding device: \(error)")
}
if let layer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = layer
layer.videoGravity = AVLayerVideoGravityResizeAspect
if let layerConnection = layer.connection {
if UIDevice.currentDevice().orientation == .LandscapeRight {
layerConnection.videoOrientation = AVCaptureVideoOrientation.LandscapeLeft
} else if UIDevice.currentDevice().orientation == .LandscapeLeft {
layerConnection.videoOrientation = AVCaptureVideoOrientation.LandscapeRight
} else if UIDevice.currentDevice().orientation == .Portrait {
layerConnection.videoOrientation = AVCaptureVideoOrientation.Portrait
}
}
// TODO: Set the output size of the Preview Layer here
layer.frame = previewFrame
self.view.layer.insertSublayer(layer, atIndex: 0)
}
let bufferVideoQueue = dispatch_queue_create("sample buffer delegate", DISPATCH_QUEUE_SERIAL)
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: bufferVideoQueue)
captureSession.addOutput(videoOutput)
if let connection = videoOutput.connectionWithMediaType(AVMediaTypeVideo) {
self.outputConnection = connection
}
let bufferAudioQueue = dispatch_queue_create("audio buffer delegate", DISPATCH_QUEUE_SERIAL)
let audioOutput = AVCaptureAudioDataOutput()
audioOutput.setSampleBufferDelegate(self, queue: bufferAudioQueue)
captureSession.addOutput(audioOutput)
captureSession.startRunning()
}
func getAssetWriter() -> AVAssetWriter? {
NSLog("Video Controller: getAssetWriter")
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
print("Video Controller: getAssetWriter: documentDir Error")
return nil
}
let local_video_name = NSUUID().UUIDString + ".mp4"
self.videoOutputURL = documentDirectory.URLByAppendingPathComponent(local_video_name)
guard let url = self.videoOutputURL else {
return nil
}
self.assetWriter = try? AVAssetWriter(URL: url, fileType: AVFileTypeMPEG4)
guard let writer = self.assetWriter else {
return nil
}
let videoSettings: [String : AnyObject] = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : captureSize.width,
AVVideoHeightKey : captureSize.height,
]
assetWriterInputCamera = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
assetWriterInputCamera?.expectsMediaDataInRealTime = true
writer.addInput(assetWriterInputCamera!)
let audioSettings : [String : AnyObject] = [
AVFormatIDKey : NSInteger(kAudioFormatMPEG4AAC),
AVNumberOfChannelsKey : 2,
AVSampleRateKey : NSNumber(double: 44100.0)
]
assetWriterInputAudio = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: audioSettings)
assetWriterInputAudio?.expectsMediaDataInRealTime = true
writer.addInput(assetWriterInputAudio!)
return writer
}
func configurePreset() {
NSLog("Video Controller: configurePreset")
if captureSession.canSetSessionPreset(AVCaptureSessionPreset1280x720) {
captureSession.sessionPreset = AVCaptureSessionPreset1280x720
} else {
captureSession.sessionPreset = AVCaptureSessionPreset1920x1080
}
}
func startRecording() {
NSLog("Video Controller: Start recording")
captureSize = UIDeviceOrientationIsLandscape(UIDevice.currentDevice().orientation) ? CAPTURE_SIZE_LANDSCAPE : CAPTURE_SIZE_PORTRAIT
if let connection = self.outputConnection {
if connection.supportsVideoOrientation {
if UIDevice.currentDevice().orientation == .LandscapeRight {
connection.videoOrientation = AVCaptureVideoOrientation.LandscapeLeft
NSLog("orientation: right")
} else if UIDevice.currentDevice().orientation == .LandscapeLeft {
connection.videoOrientation = AVCaptureVideoOrientation.LandscapeRight
NSLog("orientation: left")
} else {
connection.videoOrientation = AVCaptureVideoOrientation.Portrait
NSLog("orientation: portrait")
}
}
}
if let writer = getAssetWriter() {
self.assetWriter = writer
let recordingClock = self.captureSession.masterClock
writer.startWriting()
writer.startSessionAtSourceTime(CMClockGetTime(recordingClock))
}
}
func stopRecording() {
NSLog("Video Controller: Stop recording")
if let writer = self.assetWriter {
writer.finishWritingWithCompletionHandler{Void in
print("Recording finished")
// TODO: Handle the video file, copy it from the temp directory etc.
}
}
}
//MARK: Implementation for AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
if !self.isRecordingStarted {
return
}
if let audio = self.assetWriterInputAudio where connection.audioChannels.count > 0 && audio.readyForMoreMediaData {
dispatch_async(audioQueue!) {
audio.appendSampleBuffer(sampleBuffer)
}
return
}
if let camera = self.assetWriterInputCamera where camera.readyForMoreMediaData {
dispatch_async(videoQueue!) {
camera.appendSampleBuffer(sampleBuffer)
}
}
}
}
Additional Edit Info
Its seems from our additional conversations in the comments that what you want is to reduce the physical size of the output video while keeping the dimensions as high as you can (to retain quality). Remember, the size you position a layer on the screen is POINTs, not PIXELS. You're writing an output file in pixels - it's not a 1:1 comparison to the iPhone screen reference units.
To reduce the size of the output file, you have two easy options:
Reduce the resolution - but if you go too small, you'll lose quality when playing it back, especially if when playing it back it gets scaled up again. Try 640x360 or 720x480 for the output pixels.
Adjust the compression settings. The iPhone has default settings that typically produce a higher quality (larger output file size) video.
Replace the video settings with these options and see how you go:
let videoSettings: [String : AnyObject] = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : captureSize.width,
AVVideoHeightKey : captureSize.height,
AVVideoCompressionPropertiesKey : [
AVVideoAverageBitRateKey : 2000000,
AVVideoProfileLevelKey : H264_Main_4_1,
AVVideoMaxKeyFrameIntervalKey : 90,
]
]
The AVCompressionProperties tell AVFoundation how to actually compress the video. The lower the bit rate, the higher the compression (and therefore the better it streams but ALSO the less disk space it uses BUT it will have lower quality). MaxKeyFrame interval is how often it writes out an uncompressed frame, setting this higher (in our ~30 frames per second video 90 will be once every 1.5 seconds) also reduces quality but decreases size too. You'll find the constants referenced here https://developer.apple.com/library/prerelease/ios/documentation/AVFoundation/Reference/AVFoundation_Constants/index.html#//apple_ref/doc/constant_group/Video_Settings

Resources