the camera preview does not appear when I print the label - ios

I share the code below when I turn on the camera, I print the label and the label does not appear. What is the problem?
but the label does not appear and it does not appear in my program My program detects the object and I need to print it with the label at the end but unfortunately I can not print the label
#IBOutlet weak var label: UILabel!
override func viewDidLoad() {
super.viewDidLoad()
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .photo
guard let captureDevice = AVCaptureDevice.default(for: .video) else{ return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "CameraQueue"))
captureSession.addOutput(dataOutput)
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
guard let model = try? VNCoreMLModel(for: Resnet50().model) else { return }
let request = VNCoreMLRequest(model : model) {(finishedReq,error) in
guard let results = finishedReq.results as? [VNClassificationObservation] else{ return }
guard let firstObservation = results.first else { return }
print(firstObservation.identifier , firstObservation.confidence)
DispatchQueue.main.async {
self.label.text = "\(firstObservation.identifier) ,\(firstObservation.confidence)"
}
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}

You need to call self.view.bringSubview(toFront: self.label) after you add the preview layer to the view's subview. When you add the preview layer, it will be on top of the existing subviews.

Related

Record depth map from iPhone as sequence

I want to create an application on IOS that can record and save RGB+Depth data. I have been able to capture both data from the dual-camera and preview on the screen in real-time. Now I want to save it as two sequences in the library (one RGB sequence and one depth map sequence).
So my question is how can I save this depth information on the iPhone gallery as a video or sequence, saving at the same time the RGB info, for future deep processing?
I am working with Xcode 10.2, Swift 5 and an iPhone XS.
import UIKit
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var previewView: UIImageView!
#IBOutlet weak var previewModeControl: UISegmentedControl!
var previewMode = PreviewMode.original //Original(RGB) or Depth
let session = AVCaptureSession()
let dataOutputQueue = DispatchQueue(label: "video data queue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
var background: CIImage?
var depthMap: CIImage?
var scale: CGFloat = 0.0
override func viewDidLoad() {
super.viewDidLoad()
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
configureCaptureSession()
session.startRunning()
}
override var shouldAutorotate: Bool {
return false
}
func configureCaptureSession() {
session.beginConfiguration()
//Add input to the session
guard let camera = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .unspecified) else {
fatalError("No depth video camera available")
}
session.sessionPreset = .photo
do{
let cameraInput = try AVCaptureDeviceInput(device: camera)
if session.canAddInput(cameraInput){
session.addInput(cameraInput)
}else{
fatalError("Error adding input device to session")
}
}catch{
fatalError(error.localizedDescription)
}
//Add output to the session
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
if session.canAddOutput(videoOutput){
session.addOutput(videoOutput)
}else{
fatalError("Error adding output to session")
}
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
//Add output to the session DEPTH
let depthOutput = AVCaptureDepthDataOutput()
//Set the current view controller as the delegate for the new object
depthOutput.setDelegate(self, callbackQueue: dataOutputQueue)
depthOutput.isFilteringEnabled = true //take advantge of holesin the data
if session.canAddOutput(depthOutput){
session.addOutput(depthOutput)
}else{
fatalError("Error adding output to session")
}
let depthConnection = depthOutput.connection(with: .depthData)
depthConnection?.videoOrientation = .portrait
let outputRect = CGRect(x: 0, y: 0, width: 1, height: 1)
let videoRect = videoOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
let depthRect = depthOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
scale = max(videoRect.width, videoRect.height) / max(depthRect.width, depthRect.height)
do{
try camera.lockForConfiguration()
if let frameDuration = camera.activeDepthDataFormat?.videoSupportedFrameRateRanges.first?.minFrameDuration{
camera.activeVideoMinFrameDuration = frameDuration
}
camera.unlockForConfiguration()
}catch{
fatalError(error.localizedDescription)
}
session.commitConfiguration()
}
#IBAction func previewModeChanged(_ sender: UISegmentedControl) {
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
}
}
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let image = CIImage(cvPixelBuffer: pixelBuffer!)
let previewImage: CIImage
switch previewMode {
case .original:
previewImage = image
case .depth:
previewImage = depthMap ?? image
//default:
//previewImage = image
}
let displayImage = UIImage(ciImage: previewImage)
DispatchQueue.main.async {
[weak self] in self?.previewView.image = displayImage
}
}
}
extension ViewController: AVCaptureDepthDataOutputDelegate{
func depthDataOutput(_ output: AVCaptureDepthDataOutput, didOutput depthData: AVDepthData, timestamp: CMTime, connection: AVCaptureConnection) {
if previewMode == .original{
return
}
var convertedDepth: AVDepthData
if depthData.depthDataType != kCVPixelFormatType_DisparityFloat32{
convertedDepth = depthData.converting(toDepthDataType: kCVPixelFormatType_DisparityFloat32)
}else{
convertedDepth = depthData
}
let pixelBuffer = convertedDepth.depthDataMap
pixelBuffer.clamp()
let depthMap = CIImage(cvPixelBuffer: pixelBuffer)
DispatchQueue.main.async {
[weak self] in self?.depthMap = depthMap
}
}
}
Actual result preview on screen in real-time the different CIImage selected on the UI (image or depthMap)

The Vision API in Xcode 10/Swift doesn't detect anything. Am I doing something obviously wrong?

I have been tinkering with this for a long time and as I'm new to Swift I'm struggling. I am trying to detect a rectangle in the camera's live feed (the eventual goal is to detect when a crossword puzzle is seen) but the included code picks up nothing. I'm looking for the "Rectangle Detected" string in the console but it looks like that code is never reached. Can anyone see why? Here is my ViewController code:
Many thanks in advance.
#IBOutlet var cameraView: UIView!
var rootLayer: CALayer! = nil
private lazy var captureSession: AVCaptureSession = {
let session = AVCaptureSession()
session.sessionPreset = AVCaptureSession.Preset.photo
guard let backCamera = AVCaptureDevice.default(for: .video),
let input = try? AVCaptureDeviceInput(device: backCamera) else {
return session
}
session.addInput(input)
return session
}()
private lazy var cameraLayer: AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
private let handler = VNSequenceRequestHandler()
fileprivate var lastObservation: VNDetectedObjectObservation?
lazy var highlightView: UIView = {
let view = UIView()
view.layer.borderColor = UIColor.red.cgColor
view.layer.borderWidth = 4
view.backgroundColor = .clear
return view
}()
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
/*cameraView.layer.addSublayer(cameraLayer)
cameraView.addSubview(highlightView)*/
rootLayer = cameraView.layer
cameraLayer.frame = rootLayer.bounds
rootLayer.insertSublayer(cameraLayer, at: 0)
cameraView.addSubview(highlightView)
let output = AVCaptureVideoDataOutput()
output.setSampleBufferDelegate(self, queue: DispatchQueue(label: "queue"))
captureSession.addOutput(output)
captureSession.startRunning()
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer),
let observation = lastObservation else {
return
}
let request = VNDetectRectanglesRequest(completionHandler: self.handleRectangle)
do {
try handler.perform([request], on: pixelBuffer)
}
catch {
print(error)
}
}
fileprivate func handleRectangle(request: VNRequest, error: Error?) {
DispatchQueue.main.async {
guard let newObservation = request.results?.first as? VNDetectedObjectObservation else {
return
}
self.lastObservation = newObservation
print("Rectangle Detected")
var transformedRect = newObservation.boundingBox
transformedRect.origin.y = 1 - transformedRect.origin.y
let convertedRect = self.cameraLayer.layerRectConverted(fromMetadataOutputRect: transformedRect)
self.highlightView.frame = convertedRect
}
}
}
fileprivate var lastObservation: VNDetectedObjectObservation?
This property is Optional, so starts as nil.
Every call of captureOutput didOutput will check if its nil and exit. so it will never be set. Remove let observation = lastObservation from your guard statement and this will fix your initial issue. You may then need to change the logic slightly

Variables show up as nil - swift 4 IOS

For some reason my variables stringy and stringy are printing to the console just fine, but when I try to set them to a label, they show up as nil.
My goal is to print out the string and the float to the app view controller but this is just not working.
I think it has something to do with the viewdidload, as if its hiding the global variables. however if I try to set my label outside the viewdidload I get a declaration error.
// ViewController.swift
// Intellicam
//
import UIKit
import AVKit
import Vision
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
var stringy:String!
var stringie:Float!
override func viewDidLoad() {
super.viewDidLoad()
//here we start the camera
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .photo
guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else {return}
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(dataOutput)
// let request = VNCoreMLModel(model: VNCoreMLModel, completionHandler: VNRequestCompletionHandler)
// VNImageRequestHandler(cgImage: <#T##CGImage#>, options: <#T##[VNImageOption : Any]#>)
self.Labele.text = "Guess: \(stringy) + Certainty: \(stringie)"
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
//print("Camera was able to capture a frame:", Date())
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
guard let model = try? VNCoreMLModel(for: Resnet50().model) else {return}
let request = VNCoreMLRequest(model: model){
(finishedReq, err) in
//print(finishedReq.results)
guard let results = finishedReq.results as? [VNClassificationObservation] else {return}
guard let firstObservastion = results.first else {return}
//print("Guess: \(firstObservastion.identifier) Certainty: \(firstObservastion.confidence)%")
self.stringy = firstObservastion.identifier
self.stringie = firstObservastion.confidence
print(self.stringy)
print(self.stringie)
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
#IBOutlet weak var Labele: UILabel!
}
First thing is never use forcefully unwrap until you are sure about value. In your case VNCoreModelRequest can fail and your both variable will be un assigned so it will defiantly crash your app.
One more thing make sure you use proper naming convention to your label.
Your issue is you are not setting label value from result you are getting.
To fix this
var stringy:String? {
didSet {
DispatchQueue.main.async {
self.Labele.text = self.stringy
}
}
}
OR
self.stringy = firstObservastion.identifier
self.stringie = firstObservastion.confidence
DispatchQueue.main.async {
self.Labele.text = "Guess: \(stringy) + Certainty: \(stringie)"
}

Recording videos with real-time filters in Swift

I am new to swift and trying to build a camera app which can apply real-time filters, and save with the applied filters.
So far i can preview real-time with the applied filters, but when i save the video its all black.
import UIKit
import AVFoundation
import AssetsLibrary
import CoreMedia
import Photos
class ViewController: UIViewController , AVCaptureVideoDataOutputSampleBufferDelegate {
var captureSession: AVCaptureSession!
#IBOutlet weak var previewView: UIView!
#IBOutlet weak var recordButtton: UIButton!
#IBOutlet weak var imageView: UIImageView!
var assetWriter: AVAssetWriter?
var assetWriterPixelBufferInput: AVAssetWriterInputPixelBufferAdaptor?
var isWriting = false
var currentSampleTime: CMTime?
var currentVideoDimensions: CMVideoDimensions?
override func viewDidLoad() {
super.viewDidLoad()
FilterVendor.register()
setupCaptureSession()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
func setupCaptureSession() {
let captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
guard let captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo), let input = try? AVCaptureDeviceInput(device: captureDevice) else {
print("Can't access the camera")
return
}
if captureSession.canAddInput(input) {
captureSession.addInput(input)
}
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
if((previewLayer) != nil) {
view.layer.addSublayer(previewLayer!)
}
captureSession.startRunning()
}
#IBAction func record(_ sender: Any) {
if isWriting {
print("stop record")
self.isWriting = false
assetWriterPixelBufferInput = nil
assetWriter?.finishWriting(completionHandler: {[unowned self] () -> Void in
self.saveMovieToCameraRoll()
})
} else {
print("start record")
createWriter()
assetWriter?.startWriting()
assetWriter?.startSession(atSourceTime: currentSampleTime!)
isWriting = true
}
}
func saveMovieToCameraRoll() {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: self.movieURL() as URL)
}) { saved, error in
if saved {
print("saved")
}
}
}
func movieURL() -> NSURL {
let tempDir = NSTemporaryDirectory()
let url = NSURL(fileURLWithPath: tempDir).appendingPathComponent("tmpMov.mov")
return url! as NSURL
}
func checkForAndDeleteFile() {
let fm = FileManager.default
let url = movieURL()
let exist = fm.fileExists(atPath: url.path!)
if exist {
do {
try fm.removeItem(at: url as URL)
} catch let error as NSError {
print(error.localizedDescription)
}
}
}
func createWriter() {
self.checkForAndDeleteFile()
do {
assetWriter = try AVAssetWriter(outputURL: movieURL() as URL, fileType: AVFileTypeQuickTimeMovie)
} catch let error as NSError {
print(error.localizedDescription)
return
}
let outputSettings = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : Int(currentVideoDimensions!.width),
AVVideoHeightKey : Int(currentVideoDimensions!.height)
] as [String : Any]
let assetWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings as? [String : AnyObject])
assetWriterVideoInput.expectsMediaDataInRealTime = true
assetWriterVideoInput.transform = CGAffineTransform(rotationAngle: CGFloat(M_PI / 2.0))
let sourcePixelBufferAttributesDictionary = [
String(kCVPixelBufferPixelFormatTypeKey) : Int(kCVPixelFormatType_32BGRA),
String(kCVPixelBufferWidthKey) : Int(currentVideoDimensions!.width),
String(kCVPixelBufferHeightKey) : Int(currentVideoDimensions!.height),
String(kCVPixelFormatOpenGLESCompatibility) : kCFBooleanTrue
] as [String : Any]
assetWriterPixelBufferInput = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: assetWriterVideoInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if assetWriter!.canAdd(assetWriterVideoInput) {
assetWriter!.add(assetWriterVideoInput)
} else {
print("no way\(assetWriterVideoInput)")
}
}
func captureOutput(_ captureOutput: AVCaptureOutput, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection) {
autoreleasepool {
connection.videoOrientation = AVCaptureVideoOrientation.landscapeLeft;
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer)
let filter = CIFilter(name: "Fİlter")!
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)!
self.currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDescription)
self.currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer)
if self.isWriting {
if self.assetWriterPixelBufferInput?.assetWriterInput.isReadyForMoreMediaData == true {
var newPixelBuffer: CVPixelBuffer? = nil
CVPixelBufferPoolCreatePixelBuffer(nil, self.assetWriterPixelBufferInput!.pixelBufferPool!, &newPixelBuffer)
let success = self.assetWriterPixelBufferInput?.append(newPixelBuffer!, withPresentationTime: self.currentSampleTime!)
if success == false {
print("Pixel Buffer failed")
}
}
}
DispatchQueue.main.async {
if let outputValue = filter.value(forKey: kCIOutputImageKey) as? CIImage {
let filteredImage = UIImage(ciImage: outputValue)
self.imageView.image = filteredImage
}
}
}
}
}
I've added some comments to the critical part below:
func captureOutput(_ captureOutput: AVCaptureOutput, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection) {
autoreleasepool {
connection.videoOrientation = AVCaptureVideoOrientation.landscapeLeft;
// COMMENT: This line makes sense - this is your pixelbuffer from the camera.
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// COMMENT: OK, so you turn pixelBuffer into a CIImage...
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer)
// COMMENT: And now you've create a CIImage with a Filter instruction...
let filter = CIFilter(name: "Fİlter")!
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)!
self.currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDescription)
self.currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer)
if self.isWriting {
if self.assetWriterPixelBufferInput?.assetWriterInput.isReadyForMoreMediaData == true {
// COMMENT: Here's where it gets weird. You've declared a new, empty pixelBuffer... but you already have one (pixelBuffer) that contains the image you want to write...
var newPixelBuffer: CVPixelBuffer? = nil
// COMMENT: And you grabbed memory from the pool.
CVPixelBufferPoolCreatePixelBuffer(nil, self.assetWriterPixelBufferInput!.pixelBufferPool!, &newPixelBuffer)
// COMMENT: And now you wrote an empty pixelBuffer back <-- this is what's causing the black frame.
let success = self.assetWriterPixelBufferInput?.append(newPixelBuffer!, withPresentationTime: self.currentSampleTime!)
if success == false {
print("Pixel Buffer failed")
}
}
}
// COMMENT: And now you're sending the filtered image back to the screen.
DispatchQueue.main.async {
if let outputValue = filter.value(forKey: kCIOutputImageKey) as? CIImage {
let filteredImage = UIImage(ciImage: outputValue)
self.imageView.image = filteredImage
}
}
}
}
It looks to me like you're basically getting the screen image, creating a filtered copy, then making a NEW pixel buffer which is empty and writing that out.
If you write the pixelBuffer you grabbed instead of the new one you're creating, you should successfully write the image.
What you need to successfully write out the filtered video is to create a new CVPixelBuffer from a CIImage - that solution exists here on StackOverflow already, I know because I needed that step myself!

Swift iOS, wondering if someone could explain why output is rotated to the right degrees?

I am following this link here How to apply filter to Video real-time using Swift and for whatever reason the UIImageOrientation is rotated to the left 90 degrees.I have tried to rectify this by setting the orientation to Up but it still appears the same. Does anyone have any idea why this is? I'm not sure if it is the image, previewlayer or image view that is causing this
Here is the code:
import UIKit
import AVFoundation
import CoreMedia
let noirFilter = CIFilter(name: "CIPhotoEffectNoir")!
let sepiaFilter = CIFilter(name: "CISepiaTone")!
let vignetteEffect = CIFilter(name: "CIVignetteEffect")!
let Filters = [noirFilter,sepiaFilter,vignetteEffect]
class TestViewController:UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var Photo: UIButton!
var sessionQueue: dispatch_queue_t!
var stillImageOutput: AVCaptureStillImageOutput?
var videoDeviceInput: AVCaptureDeviceInput?
var buttonTapped = false
override func viewDidLoad() {
super.viewDidLoad()
sessionQueue = dispatch_queue_create("com.bradleymackey.Backchat.sessionQueue",DISPATCH_QUEUE_SERIAL)
let captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
var videoDeviceInput: AVCaptureVideoDeviceInput?
do
{
let input = try AVCaptureDeviceInput(device: backCamera)
captureSession.addInput(input)
self.videoDeviceInput = videoDeviceInput
}
catch
{
print("can't access camera")
return
}
// although we don't use this, it's required to get captureOutput invoked
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
imageView.contentMode = UIViewContentMode.ScaleAspectFill
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dispatch_queue_create("sample buffer delegate", DISPATCH_QUEUE_SERIAL))
if captureSession.canAddOutput(videoOutput)
{
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
let stillImageOutput: AVCaptureStillImageOutput = AVCaptureStillImageOutput()
if captureSession.canAddOutput(stillImageOutput) {
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
captureSession.addOutput(stillImageOutput)
self.stillImageOutput = stillImageOutput
}
}
override func viewWillLayoutSubviews() {
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
let filter = Filters[1]
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(CVPixelBuffer: pixelBuffer!)
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let filteredImage = UIImage(CIImage: filter.valueForKey(kCIOutputImageKey) as! CIImage!)
var newImage = UIImage(CIImage: filter.valueForKey(kCIOutputImageKey) as! CIImage!, scale:1.0, orientation: .Up)
dispatch_async(dispatch_get_main_queue())
{
self.imageView.image = newImage
}
}
#IBAction func snapStill(sender: UIButton) {
print("snapStillImage")
let previewController = PreviewViewController(nibName: "PreviewViewController", bundle: nil)
previewController.media = Media.Photo(image: imageView.image!)
previewController.isFrontCamera = false
self.presentViewController(previewController, animated: true, completion: nil)
}
override func viewDidLayoutSubviews()
{
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
The reason for this is that the orientation is not set.
You need to set the orientation in captureOutput()
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
connection.videoOrientation = .portrait
...
This should solve your problem.

Resources