So, I've magnet out how to make a Zoom Effect with CATransform3DMakeScale(2.4, 2.4, 2.4) but now I have problems trying to save the Zoome preview Message (code as I do Zooming):
// init camera device
let captureDevice : AVCaptureDevice? = initCaptureDevice()
print("camera was initialized")
// Prepare output
initOutput()
if (captureDevice != nil) {
let deviceInput : AVCaptureInput? = initInputDevice(captureDevice: captureDevice!)
if (deviceInput != nil) {
initSession(deviceInput: deviceInput!)
// Preview Size
let previewLayer: AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.session)
previewLayer.frame = self.view.bounds
previewLayer.transform = CATransform3DMakeScale(2.4, 2.4, 2.4)
imagePreviewScale = previewLayer.contentsScale
self.view.layer.addSublayer(previewLayer)
self.session?.startRunning()
Now i tried to save the previewed Zoomed image like so:
let videoConnection : AVCaptureConnection? = self.imageOutput?.connection(withMediaType: AVMediaTypeVideo)
if (videoConnection != nil) {
videoConnection?.videoScaleAndCropFactor = imagePreviewScale
self.imageOutput?.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (imageDataSampleBuffer, error) -> Void in
if (imageDataSampleBuffer != nil) {
// Capture JPEG
let imageData : NSData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(imageDataSampleBuffer) as NSData
// JPEG
let image = UIImage(data: imageData as Data)
and added the line
imagePreviewScale = previewLayer.contentsScale
But still nothing happens, please anyone can tell me how to save The Exactly Zoomed picture?
The problem is that previewLayer.contentsScale is 1, so you're setting videoScaleAndCropFactor to 1.
You need to set it to
videoConnection?.videoScaleAndCropFactor = 2.4
Related
I want to make a custom cameraView overlay. I want to use the overlay which is an image as a template. But the rect of clear space will change depends on phone.
Template:
I tried to create a view as container behind image. But the image that got captured will include the part that I dont want
self.session = AVCaptureSession()
self.session!.sessionPreset = AVCaptureSession.Preset.photo
let backCamera = AVCaptureDevice.default(for: AVMediaType.video)
var error: NSError?
var input: AVCaptureDeviceInput!
do {
input = try AVCaptureDeviceInput(device: backCamera!)
} catch let error1 as NSError {
error = error1
input = nil
print(error!.localizedDescription)
}
if error == nil && session!.canAddInput(input) {
session!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput?.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if session!.canAddOutput(stillImageOutput!) {
session!.addOutput(stillImageOutput!)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.session!)
videoPreviewLayer!.videoGravity = AVLayerVideoGravity.resizeAspect
videoPreviewLayer!.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
previewView.layer.addSublayer(videoPreviewLayer!)
session!.startRunning()
}
}
//inviewdidload
videoPreviewLayer!.frame = previewView.bounds//previewView is uiview behind image
The expected result is that the white space would be the custom camera view. also it seems that AVCaptureStillImageOutput was deprecated in iOS 10.0
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 7 years ago.
Improve this question
I'm using AVCaptureSession to create a camera and I'm trying to take a picture with it. Here is the code that loads the camera...
func reloadCamera() {
cameraView.backgroundColor = UIColor.clearColor()
captureSession = AVCaptureSession()
captureSession!.sessionPreset = AVCaptureSessionPresetHigh
let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
if (camera == false) {
let videoDevices = AVCaptureDevice.devicesWithMediaType(AVMediaTypeVideo)
for device in videoDevices {
let device = device as! AVCaptureDevice
if device.position == AVCaptureDevicePosition.Front {
captureDevice = device
break
} else {
captureDevice = backCamera
}
}
} else {
captureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
}
do {
let input = try AVCaptureDeviceInput(device: captureDevice)
if captureSession!.canAddInput(input) {
captureSession!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput!.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if captureSession!.canAddOutput(stillImageOutput) {
captureSession!.addOutput(stillImageOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer!.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer!.connection?.videoOrientation = AVCaptureVideoOrientation.Portrait
previewLayer?.frame = cameraView.bounds
cameraView.layer.addSublayer(previewLayer!)
captureSession!.startRunning()
}
}
} catch let error as NSError {
// Handle any errors
print(error)
}
}
Here is how I take a photo...
func didPressTakePhoto(){
toggleFlash()
if let videoConnection = stillImageOutput?.connectionWithMediaType(AVMediaTypeVideo){
videoConnection.videoOrientation = (previewLayer?.connection.videoOrientation)!
stillImageOutput?.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: {
(sampleBuffer, error) in
if sampleBuffer != nil {
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
self.capturedImage = UIImage(data: imageData)
if self.camera == true {
self.capturedImage = UIImage(CGImage: self.capturedImage.CGImage!, scale: 1.0, orientation: UIImageOrientation.Right)
} else {
self.capturedImage = UIImage(CGImage: self.capturedImage.CGImage!, scale: 1.0, orientation: UIImageOrientation.LeftMirrored)
}
self.tempImageView.image = self.capturedImage
UIImageWriteToSavedPhotosAlbum(self.capturedImage, nil, nil, nil);
self.tempImageView.hidden = false
self.goButton.hidden = false
self.cameraView.hidden = true
self.removeImageButton.hidden = false
self.captureButton.hidden = true
self.flashChanger.hidden = true
self.switchCameraButton.hidden = true
}
})
}
}
But what's happening is the picture that is taken is as large as the entire screen (like Snapchat), but I only want it to be as big as the UIView I'm taking it from. Tell me if I you need any more information. Thank you!
First of all, you are the one who is setting the capture session's preset to AVCaptureSessionPresetHigh. If you don't need that, don't do that; use a smaller-size preset. For example, use AVCaptureSessionPreset640x480 to get a smaller size.
Second, no matter what the size of the resulting photo may be, reducing it to the size you need, and displaying it at that size, is entirely up to you. Ultimately, it's just a matter of the size and content mode of your image view, though it is always better to reduce the image size as well, in order to avoid wasting memory (as you would be doing by displaying an image that is significantly larger then what the user is shown).
I am having a hard time for something I think shouldn’t be so difficult, so I presume I must be looking at the problem from the wrong angle.
In order to understand how AVCaptureStillImageOutput and the camera work I made a tiny app.
This app is able to take a picture and save it as a PNG file (I do not want JPEG). The next time the app is launched, it checks if a file is present and if it is, the image stored inside the file is used as the background view of the app. The idea is rather simple.
The problem is that it does not work. If someone can tell me what I am doing wrong that will be very helpful.
I would like the picture to appear as a background the same way it was on the display when it was taken, but it is rotated or has the wrong scale ..etc..
Here is the relevant code (I can provide more information if ever needed).
The viewDidLoad method:
override func viewDidLoad() {
super.viewDidLoad()
// For the photo capture:
captureSession.sessionPreset = AVCaptureSessionPresetHigh
// Select the appropriate capture devices:
for device in AVCaptureDevice.devices() {
// Make sure this particular device supports video.
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera.
if(device.position == AVCaptureDevicePosition.Back) {
captureDevice = device as? AVCaptureDevice
}
}
}
tapGesture = UITapGestureRecognizer(target: self, action: Selector("takePhoto"))
self.view.addGestureRecognizer(tapGesture)
let filePath = self.toolBox.getDocumentsDirectory().stringByAppendingPathComponent("BackGroundImage#2x.png")
if !NSFileManager.defaultManager().fileExistsAtPath(filePath) {return}
let bgImage = UIImage(contentsOfFile: filePath),
bgView = UIImageView(image: bgImage)
self.view.addSubview(bgView)
}
The method to handle the picture taking:
func takePhoto() {
if !captureSession.running {
beginPhotoCaptureSession()
return
}
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection) {
(imageDataSampleBuffer, error) -> Void in
if error == nil {
var localImage = UIImage(fromSampleBuffer: imageDataSampleBuffer)
UIGraphicsBeginImageContext(localImage!.size)
CGContextRotateCTM (UIGraphicsGetCurrentContext(), CGFloat(M_PI_2))
//localImage!.drawAtPoint(CGPointZero)
localImage!.drawAtPoint(CGPoint(x: -localImage!.size.height, y: -localImage!.size.width))
//localImage!.drawAtPoint(CGPoint(x: -localImage!.size.width, y: -localImage!.size.height))
localImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
localImage = resizeImage(localImage!, toSize: self.view.frame.size)
if let data = UIImagePNGRepresentation(localImage!) {
let bitMapName = "BackGroundImage#2x"
let filename = self.toolBox.getDocumentsDirectory().stringByAppendingPathComponent("\(bitMapName).png")
data.writeToFile(filename, atomically: true)
print("Picture saved: \(bitMapName)\n\(filename)")
}
} else {print("Error on taking a picture:\n\(error)")}
}
}
captureSession.stopRunning()
previewLayer!.removeFromSuperlayer()
}
The method to start the AVCaptureSession:
func beginPhotoCaptureSession() {
do {let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(input)
} catch let error as NSError {
// Handle any errors:
print(error)
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = self.view.layer.frame
self.view.layer.addSublayer(previewLayer!)
captureSession.startRunning()
stillImageOutput.outputSettings = [kCVPixelBufferPixelFormatTypeKey:Int(kCVPixelFormatType_32BGRA)]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
}
As an example here is an image of a picture taken with the app:
Now here is what I get as the background of the app when it is relaunched:
If it was working correctly the 2 pictures would be similar.
I cant see rotation in screenshot.. But the scale is a problem which is related to your code when you are drawing it in function takePhoto. You can try
func takePhoto() {
if !captureSession.running {
beginPhotoCaptureSession()
return
}
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection) {
(imageDataSampleBuffer, error) -> Void in
if error == nil {
if let data = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(imageDataSampleBuffer) {
let bitMapName = "BackGroundImage#2x"
let filename = self.toolBox.getDocumentsDirectory().stringByAppendingPathComponent("\(bitMapName).png")
data.writeToFile(filename, atomically: true)
print("Picture saved: \(bitMapName)\n\(filename)")
}
} else {print("Error on taking a picture:\n\(error)")}
}
}
captureSession.stopRunning()
previewLayer!.removeFromSuperlayer()
}
For those who might be hitting the same issue at one point, I post below what I fixed in the code to make it work. There may still be some improvement needed to support all possible orientation, but it's OK for a start.
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection) {
(imageDataSampleBuffer, error) -> Void in
if error == nil {
var localImage = UIImage(fromSampleBuffer: imageDataSampleBuffer)
var imageSize = CGSize(width: UIScreen.mainScreen().bounds.height * UIScreen.mainScreen().scale,
height: UIScreen.mainScreen().bounds.width * UIScreen.mainScreen().scale)
localImage = resizeImage(localImage!, toSize: imageSize)
imageSize = CGSize(width: imageSize.height, height: imageSize.width)
UIGraphicsBeginImageContext(imageSize)
CGContextRotateCTM (UIGraphicsGetCurrentContext(), CGFloat(M_PI_2))
localImage!.drawAtPoint(CGPoint(x: 0.0, y: -imageSize.width))
localImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
if let data = UIImagePNGRepresentation(localImage!) {
data.writeToFile(self.bmpFilePath, atomically: true)
}
} else {print("Error on taking a picture:\n\(error)")}
}
}
My goal is to use an AVCaptureSession to programmatically lock focus, capture one image, activate the flash, then capture a second image after some delay.
I have managed to get the captures to work using an AVCaptureSession instance and an AVCaptureStillImageOutput. However, the images I get when calling captureStillImageAsynchronouslyFromConnection(_:completionHandler:) are 1920 x 1080, not the full 12 megapixel image my iPhone 6S camera is capable of.
Here is my capture function:
func captureImageFromStream(completion: (result: UIImage) -> Void)
{
if let stillOutput = self.stillImageOutput {
var videoConnection : AVCaptureConnection?
for connection in stillOutput.connections {
for port in connection.inputPorts! {
if port.mediaType == AVMediaTypeVideo {
videoConnection = connection as? AVCaptureConnection
break
}
}
if videoConnection != nil {
break
}
}
if videoConnection != nil {
stillOutput.captureStillImageAsynchronouslyFromConnection(videoConnection) {
(imageDataSampleBuffer, error) -> Void in
if error == nil {
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(imageDataSampleBuffer)
if let image = UIImage(data: imageData) {
completion(result: image)
}
}
else {
NSLog("ImageCapture Error: \(error)")
}
}
}
}
}
What modifications should I make to capture the image I'm looking for? I'm new to Swift, so please excuse any beginner mistakes I've made.
Before you addOutput the stillImageOutput and startRunning, you need to set your capture session preset to photo:
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
I'm trying to get a camera view for a camera app to show. After messing with the setup camera view code without success. I think it has something to do with my GLKView setup code. The UIView in the storyboard shows up on the app but its blank (white). I don't get any errors.
I'm building this for at least iOS 7.0.
I added both the createGLKView and the setupCameraView blocks just incase!
EDIT: I forgot to set an outlet to the UIView. It shows now, but the camera view is black. My background is white so I know that the camera view is showing up black.
func createGLKView() {
if (self.context != nil) {
return
}
self.context = EAGLContext(API: EAGLRenderingAPI.OpenGLES2)
//.init(API:EAGLRenderingAPI.OpenGLES2)
var view: GLKView = GLKView(frame: self.bounds)
//.init(frame: bounds.self)
view.autoresizingMask = UIViewAutoresizing(2) | UIViewAutoresizing(5)
view.translatesAutoresizingMaskIntoConstraints()
view.context = self.context
view.contentScaleFactor = 1.0
view.drawableDepthFormat = GLKViewDrawableDepthFormat.Format24
self.insertSubview(view, atIndex: 0)
glkView = view
glGenRenderbuffers(1, &renderBuffer)
glBindRenderbuffer(GLenum(GL_RENDERBUFFER), renderBuffer)
coreImageContext = CIContext(EAGLContext: self.context)
EAGLContext.setCurrentContext(self.context)
println("GLKView setup")
}
func setupCameraView() {
self.createGLKView()
var possibleDevices: NSArray = AVCaptureDevice.devicesWithMediaType(AVMediaTypeVideo)
//var device: AVCaptureDevice = possibleDevices.firstObject as! AVCaptureDevice
var device: AVCaptureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
/*
if device != true {
println("device returned!")
return
}
*/
let devices = AVCaptureDevice.devices()
// Loop through all the capture devices on this phone
for camera in devices {
// Make sure this particular device supports video
if (camera.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(camera.position == AVCaptureDevicePosition.Back) {
device = camera as! AVCaptureDevice
}
}
}
imageDetectionConfidence = 0.0
var session: AVCaptureSession = AVCaptureSession()
self.captureSession = session
session.beginConfiguration()
self.captureDevice = device
/*
for camera in possibleDevices {
if camera.position == AVCaptureDevicePosition.Back {
device = camera as! AVCaptureDevice
println("back camera selected!")
break
}
}
*/
var error: NSError?
/*
var input: AVCaptureDeviceInput = AVCaptureDeviceInput.deviceInputWithDevice(device, error: &error) as! AVCaptureDeviceInput
session.sessionPreset = AVCaptureSessionPresetPhoto
session.addInput(input)
*/
if let input: AVCaptureDeviceInput = AVCaptureDeviceInput.deviceInputWithDevice(device, error: &error) as? AVCaptureDeviceInput {
session.sessionPreset = AVCaptureSessionPresetPhoto
session.addInput(input)
}
var dataOutput: AVCaptureVideoDataOutput = AVCaptureVideoDataOutput()
dataOutput.alwaysDiscardsLateVideoFrames = true
//dataOutput.videoSettings = NSDictionary(object: Int(kCVPixelFormatType_32BGRA), forKey: kCVPixelBufferPixelFormatTypeKey as String) as [NSObject: AnyObject]
dataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey: kCVPixelFormatType_32BGRA]
dataOutput.setSampleBufferDelegate(self, queue: dispatch_get_main_queue())
session.addOutput(dataOutput)
self.stillImageOutput = AVCaptureStillImageOutput()
session.addOutput(self.stillImageOutput)
var connection: AVCaptureConnection = (dataOutput.connections as NSArray).firstObject as! AVCaptureConnection
connection.videoOrientation = AVCaptureVideoOrientation.Portrait
if device.flashAvailable {
device.lockForConfiguration(nil)
device.flashMode = AVCaptureFlashMode.Off
device.unlockForConfiguration()
if device.isFocusModeSupported(AVCaptureFocusMode.ContinuousAutoFocus) {
device.lockForConfiguration(nil)
device.focusMode = AVCaptureFocusMode.ContinuousAutoFocus
device.unlockForConfiguration()
}
}
session.commitConfiguration()
}
Assuming that the code is correct, you could try to bring your subview to the front. I've found that sometimes when adding a new subview it isn't always shown above the others and gets hidden behind them.
self.bringSubviewToFront(view)
EDIT: It might help for others to know what iOS version you are programming for.