How do you apply Core Image filters to an onscreen image using Swift/MacOS or iOS and Core Image - ios

Photos editing adjustments provides a realtime view of the applied adjustments as they are applied. I wasn't able to find any samples of how you do this. All the examples seems to show that you apply the filters through a pipeline of sorts and then take the resulting image and update the screen with the result. See code below.
Photos seems to show the adjustment applied to the onscreen image. How do they achieve this?
func editImage(inputImage: CGImage) {
DispatchQueue.global().async {
let beginImage = CIImage(cgImage: inputImage)
guard let exposureOutput = self.exposureFilter(beginImage, ev: self.brightness) else {
return
}
guard let vibranceOutput = self.vibranceFilter(exposureOutput, amount: self.vibranceAmount) else {
return
}
guard let unsharpMaskOutput = self.unsharpMaskFilter(vibranceOutput, intensity: self.unsharpMaskIntensity, radius: self.unsharpMaskRadius) else {
return
}
guard let sharpnessOutput = self.sharpenFilter(unsharpMaskOutput, sharpness: self.unsharpMaskIntensity) else {
return
}
if let cgimg = self.context.createCGImage(sharpnessOutput, from: vibranceOutput.extent) {
DispatchQueue.main.async {
self.cgImage = cgimg
}
}
}
}

OK, I just found the answer - use MTKView, which is working fine except for getting the image to fill the view correctly!
For the benefit of others here are the basics... I have yet to figure out how to position the image correctly in the view - but I can see the filter applied in realtime!
class ViewController: NSViewController, MTKViewDelegate {
....
#objc dynamic var cgImage: CGImage? {
didSet {
if let cgimg = cgImage {
ciImage = CIImage(cgImage: cgimg)
}
}
}
var ciImage: CIImage?
// Metal resources
var device: MTLDevice!
var commandQueue: MTLCommandQueue!
var sourceTexture: MTLTexture! // 2
let colorSpace = CGColorSpaceCreateDeviceRGB()
var context: CIContext!
var textureLoader: MTKTextureLoader!
override func viewDidLoad() {
super.viewDidLoad()
// Do view setup here.
let metalView = MTKView()
metalView.translatesAutoresizingMaskIntoConstraints = false
self.imageView.addSubview(metalView)
NSLayoutConstraint.activate([
metalView.bottomAnchor.constraint(equalTo: view.bottomAnchor),
metalView.trailingAnchor.constraint(equalTo: view.trailingAnchor),
metalView.leadingAnchor.constraint(equalTo: view.leadingAnchor),
metalView.topAnchor.constraint(equalTo: view.topAnchor)
])
device = MTLCreateSystemDefaultDevice()
commandQueue = device.makeCommandQueue()
metalView.delegate = self
metalView.device = device
metalView.framebufferOnly = false
context = CIContext()
textureLoader = MTKTextureLoader(device: device)
}
public func draw(in view: MTKView) {
if let ciImage = self.ciImage {
if let currentDrawable = view.currentDrawable {
let commandBuffer = commandQueue.makeCommandBuffer()
let inputImage = ciImage // 2
exposureFilter.setValue(inputImage, forKey: kCIInputImageKey)
exposureFilter.setValue(ev, forKey: kCIInputEVKey)
context.render(exposureFilter.outputImage!,
to: currentDrawable.texture,
commandBuffer: commandBuffer,
bounds: CGRect(origin: .zero, size: view.drawableSize),
colorSpace: colorSpace)
commandBuffer?.present(currentDrawable)
commandBuffer?.commit()
}
}
}

Related

Filtering Depth Data on iOS 12 appears to be rotated

I am having an issue where the Depth Data for the .builtInDualCamera appears to be rotated 90 degrees when isFilteringEnabled = true
Here is my code:
fileprivate let session = AVCaptureSession()
fileprivate let meta = AVCaptureMetadataOutput()
fileprivate let video = AVCaptureVideoDataOutput()
fileprivate let depth = AVCaptureDepthDataOutput()
fileprivate let camera: AVCaptureDevice
fileprivate let input: AVCaptureDeviceInput
fileprivate let synchronizer: AVCaptureDataOutputSynchronizer
init(delegate: CaptureSessionDelegate?) throws {
self.delegate = delegate
session.sessionPreset = .vga640x480
// Setup Camera Input
let discovery = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: .video, position: .unspecified)
if let device = discovery.devices.first {
camera = device
} else {
throw SessionError.CameraNotAvailable("Unable to load camera")
}
input = try AVCaptureDeviceInput(device: camera)
session.addInput(input)
// Setup Metadata Output (Face)
session.addOutput(meta)
if meta.availableMetadataObjectTypes.contains(AVMetadataObject.ObjectType.face) {
meta.metadataObjectTypes = [ AVMetadataObject.ObjectType.face ]
} else {
print("Can't Setup Metadata: \(meta.availableMetadataObjectTypes)")
}
// Setup Video Output
video.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
session.addOutput(video)
video.connection(with: .video)?.videoOrientation = .portrait
// ****** THE ISSUE IS WITH THIS BLOCK HERE ******
// Setup Depth Output
depth.isFilteringEnabled = true
session.addOutput(depth)
depth.connection(with: .depthData)?.videoOrientation = .portrait
// Setup Synchronizer
synchronizer = AVCaptureDataOutputSynchronizer(dataOutputs: [depth, video, meta])
let outputRect = CGRect(x: 0, y: 0, width: 1, height: 1)
let videoRect = video.outputRectConverted(fromMetadataOutputRect: outputRect)
let depthRect = depth.outputRectConverted(fromMetadataOutputRect: outputRect)
// Ratio of the Depth to Video
scale = max(videoRect.width, videoRect.height) / max(depthRect.width, depthRect.height)
// Set Camera to the framerate of the Depth Data Collection
try camera.lockForConfiguration()
if let fps = camera.activeDepthDataFormat?.videoSupportedFrameRateRanges.first?.minFrameDuration {
camera.activeVideoMinFrameDuration = fps
}
camera.unlockForConfiguration()
super.init()
synchronizer.setDelegate(self, queue: syncQueue)
}
func dataOutputSynchronizer(_ synchronizer: AVCaptureDataOutputSynchronizer, didOutput data: AVCaptureSynchronizedDataCollection) {
guard let delegate = self.delegate else {
return
}
// Check to see if all the data is actually here
guard
let videoSync = data.synchronizedData(for: video) as? AVCaptureSynchronizedSampleBufferData,
!videoSync.sampleBufferWasDropped,
let depthSync = data.synchronizedData(for: depth) as? AVCaptureSynchronizedDepthData,
!depthSync.depthDataWasDropped
else {
return
}
// It's OK if the face isn't found.
let face: AVMetadataFaceObject?
if let metaSync = data.synchronizedData(for: meta) as? AVCaptureSynchronizedMetadataObjectData {
face = (metaSync.metadataObjects.first { $0 is AVMetadataFaceObject }) as? AVMetadataFaceObject
} else {
face = nil
}
// Convert Buffers to CIImage
let videoImage = convertVideoImage(fromBuffer: videoSync.sampleBuffer)
let depthImage = convertDepthImage(fromData: depthSync.depthData, andFace: face)
// Call Delegate
delegate.captureImages(video: videoImage, depth: depthImage, face: face)
}
fileprivate func convertVideoImage(fromBuffer sampleBuffer: CMSampleBuffer) -> CIImage {
// Convert from "CoreMovie?" to CIImage - fairly straight-forward
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let image = CIImage(cvPixelBuffer: pixelBuffer!)
return image
}
fileprivate func convertDepthImage(fromData depthData: AVDepthData, andFace face: AVMetadataFaceObject?) -> CIImage {
var convertedDepth: AVDepthData
// Convert 16-bif floats up to 32
if depthData.depthDataType != kCVPixelFormatType_DisparityFloat32 {
convertedDepth = depthData.converting(toDepthDataType: kCVPixelFormatType_DisparityFloat32)
} else {
convertedDepth = depthData
}
// Pixel buffer comes straight from depthData
let pixelBuffer = convertedDepth.depthDataMap
let image = CIImage(cvPixelBuffer: pixelBuffer)
return image
}
The original Video Looks like this: (For reference)
When the values are:
// Setup Depth Output
depth.isFilteringEnabled = false
depth.connection(with: .depthData)?.videoOrientation = .portrait
The Image looks like this: (you can see the closer jacket is white, the farther jacket is grey, and the distance is dark grey - as expected)
When the values are:
// Setup Depth Output
depth.isFilteringEnabled = true
depth.connection(with: .depthData)?.videoOrientation = .portrait
The image looks like this: (You can see the color values appear to be in the right places, but the shapes in the smoothing filter appear to be rotated)
When the values are:
// Setup Depth Output
depth.isFilteringEnabled = true
depth.connection(with: .depthData)?.videoOrientation = .landscapeRight
The image looks like this: (Both the colors and the shapes appear to be horizontal)
Am I doing something wrong to get these incorrect values?
I have tried re-ordering the code
// Setup Depth Output
depth.connection(with: .depthData)?.videoOrientation = .portrait
depth.isFilteringEnabled = true
But that does nothing.
I think this is an issue related to iOS 12, because I remember this working just fine under iOS 11 (although I don't have any images saved to prove it)
Any Help is appreciated, thanks!
Unlike the suggestion to review other answers on rotating the image after creation, which I found did not work, in the AVDepthData documentation, there is a method available that does the orientation correction for you.
The method is called: depthDataByApplyingExifOrientation: which returns an instance of AVDepthData with the orientation applied, ie. you can create your image in the correct orientation you desire by passing in the parameter of your choice.
This is my helper method that returns a UIImage with the orientation fix.
- (UIImage *)createDepthMapImageFromCapturePhoto:(AVCapturePhoto *)photo {
// AVCapturePhoto which has depthData - in swift you should confirm this exists
AVDepthData *frontDepthData = [photo depthData];
// Overwrite the instance with the correct orientation applied.
frontDepthData = [frontDepthData depthDataByApplyingExifOrientation:kCGImagePropertyOrientationRight];
// Create the CIImage from the depth data using the available method.
CIImage *ciDepthImage = [CIImage imageWithDepthData:frontDepthData];
// Create CIContext which enables converting CIImage to CGImage
CIContext *context = [[CIContext alloc] init];
// Create the CGImage
CGImageRef img = [context createCGImage:ciDepthImage fromRect:[ciDepthImage extent]];
// Create the final image.
UIImage *depthImage = [UIImage imageWithCGImage:img];
// Return the depth image.
return depthImage;
}

Render a MTIImage

Please don't judge me I'm just learning Swift.
Recently I installed MetalPetal framework and I followed the instructions:
https://github.com/MetalPetal/MetalPetal#example-code
But I get error because of MTIContext. Maybe I have to declare something more of MetalPetal?
My Code:
import UIKit
import MetalPetal
import CoreGraphics
class ViewController: UIViewController {
#IBOutlet weak var image1: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
weak var image: UIImage?
image = image1.image
var ciImage = CIImage(image: image!)
var cgImage1 = convertCIImageToCGImage(inputImage: ciImage!)
let imageFromCGImage = MTIImage(cgImage: cgImage1!)
let inputImage = imageFromCGImage
let filter = MTISaturationFilter()
filter.saturation = 1
filter.inputImage = inputImage
let outputImage = filter.outputImage
let context = MTIContext()
do {
try context.render(outputImage, to: pixelBuffer)
var image3: CIImage? = try context.makeCIImage(from: outputImage!)
//context.makeCIImage(from: image)
//context.makeCGImage(from: image)
} catch {
print(error)
}
// Do any additional setup after loading the view, typically from a nib.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func convertCIImageToCGImage(inputImage: CIImage) -> CGImage? {
let context = CIContext(options: nil)
if let cgImage = context.createCGImage(inputImage, from: inputImage.extent) {
return cgImage
}
return nil
}
}
#YuAo
Input Image
An UIImage is based on either underlying Quartz image (can be retrieved with cgImage) or an underlying Core Image (can be retrieved from UIImage with ciImage).
MTIImage offers constructors for both types.
MTIContext
A MTIContext must be initialized with a device that can be retrieved by calling MTLCreateSystemDefaultDevice().
Rendering
A rendering to a pixel buffer is not needed. We can get the result by calling makeCGImage.
Test
I've taken your source code above and slightly adapted it to the aforementioned points.
I also added a second UIImageView to see the result of the filtering. I also changed the saturation to 0 to see if the filter works
If GPU or shaders are involved it makes sense to test on a real device and not on the simulator.
The result looks like this:
In the upper area you see the original jpg, in the lower area the filter is applied.
Swift
The simplified Swift code that produces this result looks like this:
override func viewDidLoad() {
super.viewDidLoad()
guard let image = UIImage(named: "regensburg.jpg") else { return }
guard let cgImage = image.cgImage else { return }
imageView1.image = image
let filter = MTISaturationFilter()
filter.saturation = 0
filter.inputImage = MTIImage(cgImage: cgImage)
if let device = MTLCreateSystemDefaultDevice(),
let outputImage = filter.outputImage {
do {
let context = try MTIContext(device: device)
let filteredImage = try context.makeCGImage(from: outputImage)
imageView2.image = UIImage(cgImage: filteredImage)
} catch {
print(error)
}
}
}

Simple CIFilter Passthru with CGImage conversion returns black pixels

The following code:
let skView = SKView()
let scene = SKScene()
override func viewDidLoad() {
super.viewDidLoad()
self.scene.scaleMode = .resizeFill
self.skView.presentScene(self.scene)
self.scene.backgroundColor = UIColor.black
self.view.addSubview(skView)
self.scene.shouldEnableEffects = true
let sprite = SKSpriteNode(imageNamed: "NAME_THAT_PIC")
sprite.position = CGPoint(x: 300, y: 400)
let effectNode = SKEffectNode()
effectNode.filter = MyFilter()
effectNode.addChild(sprite)
will call this custom filter that does nothing but create a CGImage from a CIImage, correctly invoking context.createCGImage() as reported by many people (CIImages are not pixel buffered.)
MyFilter is reduced to a simple repro test:
class MyFilter: CIFilter {
var inputImage: CIImage?
var inputImageRect: CGRect? {
guard let image = self.inputImage else {
return nil
}
return image.extent
}
public override init() {
super.init()
}
required public init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override open var outputImage: CIImage? {
guard let inputImage = self.inputImage else {
return nil
}
let context = CIContext(options:nil)
let cgImage = context.createCGImage(inputImage, from: inputImageRect!)
// ... DO SOMETHING WITH CGIMAGE DATA ...
return CIImage(cgImage: cgImage!)
}
}
If I replace MyFilter() by another built-in filter, it works and will show the altered image so the viewcontroller code works. If instead, I return inputImage directly from the filter output call, it works and the image passed in will display.
When I dump the CGImage, the dimensions of the image are correct but every pixels are set to black.
I tried creating a UIImage using UIImage(cgImage: cgImage!) but the same happens.
What is causing pixels not to be loaded in the cgImage I generated from the inputImage?

Toggle flash in ios swift

I am building an image clasifier app. On camera screen I have a switch button which I want to use to toggle flash so that user can switch on flash in low light.
Here is my code:
import UIKit
import AVFoundation
import Vision
// controlling the pace of the machine vision analysis
var lastAnalysis: TimeInterval = 0
var pace: TimeInterval = 0.33 // in seconds, classification will not repeat faster than this value
// performance tracking
let trackPerformance = false // use "true" for performance logging
var frameCount = 0
let framesPerSample = 10
var startDate = NSDate.timeIntervalSinceReferenceDate
var flash=0
class ImageDetectionViewController: UIViewController {
var callBackImageDetection :(State)->Void = { state in
}
#IBOutlet weak var previewView: UIView!
#IBOutlet weak var stackView: UIStackView!
#IBOutlet weak var lowerView: UIView!
#IBAction func swithch(_ sender: UISwitch) {
if(sender.isOn == true)
{
stopActiveSession();
let captureSession=AVCaptureSession()
let captureDevice: AVCaptureDevice?
setupCamera(flash: 1)
}
}
var previewLayer: AVCaptureVideoPreviewLayer!
let bubbleLayer = BubbleLayer(string: "")
let queue = DispatchQueue(label: "videoQueue")
var captureSession = AVCaptureSession()
var captureDevice: AVCaptureDevice?
let videoOutput = AVCaptureVideoDataOutput()
var unknownCounter = 0 // used to track how many unclassified images in a row
let confidence: Float = 0.8
// MARK: Load the Model
let targetImageSize = CGSize(width: 227, height: 227) // must match model data input
lazy var classificationRequest: [VNRequest] = {
do {
// Load the Custom Vision model.
// To add a new model, drag it to the Xcode project browser making sure that the "Target Membership" is checked.
// Then update the following line with the name of your new model.
// let model = try VNCoreMLModel(for: Fruit().model)
let model = try VNCoreMLModel(for: CodigocubeAI().model)
let classificationRequest = VNCoreMLRequest(model: model, completionHandler: self.handleClassification)
return [ classificationRequest ]
} catch {
fatalError("Can't load Vision ML model: \(error)")
}
}()
// MARK: Handle image classification results
func handleClassification(request: VNRequest, error: Error?) {
guard let observations = request.results as? [VNClassificationObservation]
else { fatalError("unexpected result type from VNCoreMLRequest") }
guard let best = observations.first else {
fatalError("classification didn't return any results")
}
// Use results to update user interface (includes basic filtering)
print("\(best.identifier): \(best.confidence)")
if best.identifier.starts(with: "Unknown") || best.confidence < confidence {
if self.unknownCounter < 3 { // a bit of a low-pass filter to avoid flickering
self.unknownCounter += 1
} else {
self.unknownCounter = 0
DispatchQueue.main.async {
self.bubbleLayer.string = nil
}
}
} else {
self.unknownCounter = 0
DispatchQueue.main.async {[weak self] in
guard let strongSelf = self
else
{
return
}
// Trimming labels because they sometimes have unexpected line endings which show up in the GUI
let identifierString = best.identifier.trimmingCharacters(in: CharacterSet.whitespacesAndNewlines)
strongSelf.bubbleLayer.string = identifierString
let state : State = strongSelf.getState(identifierStr: identifierString)
strongSelf.stopActiveSession()
strongSelf.navigationController?.popViewController(animated: true)
strongSelf.callBackImageDetection(state)
}
}
}
func getState(identifierStr:String)->State
{
var state :State = .none
if identifierStr == "entertainment"
{
state = .entertainment
}
else if identifierStr == "geography"
{
state = .geography
}
else if identifierStr == "history"
{
state = .history
}
else if identifierStr == "knowledge"
{
state = .education
}
else if identifierStr == "science"
{
state = .science
}
else if identifierStr == "sports"
{
state = .sports
}
else
{
state = .none
}
return state
}
// MARK: Lifecycle
override func viewDidLoad() {
super.viewDidLoad()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewView.layer.addSublayer(previewLayer)
}
override func viewDidAppear(_ animated: Bool) {
self.edgesForExtendedLayout = UIRectEdge.init(rawValue: 0)
bubbleLayer.opacity = 0.0
bubbleLayer.position.x = self.view.frame.width / 2.0
bubbleLayer.position.y = lowerView.frame.height / 2
lowerView.layer.addSublayer(bubbleLayer)
setupCamera(flash:2)
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
previewLayer.frame = previewView.bounds;
}
// MARK: Camera handling
func setupCamera(flash :Int) {
let deviceDiscovery = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .back)
if let device = deviceDiscovery.devices.last {
if(flash == 1)
{
if (device.hasTorch) {
do {
try device.lockForConfiguration()
if (device.isTorchAvailable) {
do {
try device.setTorchModeOn(level:0.2 )
}
catch
{
print(error)
}
device.unlockForConfiguration()
}
}
catch
{
print(error)
}
}
}
captureDevice = device
beginSession()
}
}
func beginSession() {
do {
videoOutput.videoSettings = [((kCVPixelBufferPixelFormatTypeKey as NSString) as String) : (NSNumber(value: kCVPixelFormatType_32BGRA) as! UInt32)]
videoOutput.alwaysDiscardsLateVideoFrames = true
videoOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.sessionPreset = .hd1920x1080
captureSession.addOutput(videoOutput)
let input = try AVCaptureDeviceInput(device: captureDevice!)
captureSession.addInput(input)
captureSession.startRunning()
} catch {
print("error connecting to capture device")
}
}
func stopActiveSession()
{
if captureSession.isRunning == true
{
captureSession.stopRunning()
}
}
override func viewWillDisappear(_ animated: Bool) {
self.stopActiveSession()
}
deinit {
print("deinit called")
}
}
// MARK: Video Data Delegate
extension ImageDetectionViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
// called for each frame of video
func captureOutput(_ captureOutput: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let currentDate = NSDate.timeIntervalSinceReferenceDate
// control the pace of the machine vision to protect battery life
if currentDate - lastAnalysis >= pace {
lastAnalysis = currentDate
} else {
return // don't run the classifier more often than we need
}
// keep track of performance and log the frame rate
if trackPerformance {
frameCount = frameCount + 1
if frameCount % framesPerSample == 0 {
let diff = currentDate - startDate
if (diff > 0) {
if pace > 0.0 {
print("WARNING: Frame rate of image classification is being limited by \"pace\" setting. Set to 0.0 for fastest possible rate.")
}
print("\(String.localizedStringWithFormat("%0.2f", (diff/Double(framesPerSample))))s per frame (average)")
}
startDate = currentDate
}
}
// Crop and resize the image data.
// Note, this uses a Core Image pipeline that could be appended with other pre-processing.
// If we don't want to do anything custom, we can remove this step and let the Vision framework handle
// crop and resize as long as we are careful to pass the orientation properly.
guard let croppedBuffer = croppedSampleBuffer(sampleBuffer, targetSize: targetImageSize) else {
return
}
do {
let classifierRequestHandler = VNImageRequestHandler(cvPixelBuffer: croppedBuffer, options: [:])
try classifierRequestHandler.perform(classificationRequest)
} catch {
print(error)
}
}
}
let context = CIContext()
var rotateTransform: CGAffineTransform?
var scaleTransform: CGAffineTransform?
var cropTransform: CGAffineTransform?
var resultBuffer: CVPixelBuffer?
func croppedSampleBuffer(_ sampleBuffer: CMSampleBuffer, targetSize: CGSize) -> CVPixelBuffer? {
guard let imageBuffer: CVImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
fatalError("Can't convert to CVImageBuffer.")
}
// Only doing these calculations once for efficiency.
// If the incoming images could change orientation or size during a session, this would need to be reset when that happens.
if rotateTransform == nil {
let imageSize = CVImageBufferGetEncodedSize(imageBuffer)
let rotatedSize = CGSize(width: imageSize.height, height: imageSize.width)
guard targetSize.width < rotatedSize.width, targetSize.height < rotatedSize.height else {
fatalError("Captured image is smaller than image size for model.")
}
let shorterSize = (rotatedSize.width < rotatedSize.height) ? rotatedSize.width : rotatedSize.height
rotateTransform = CGAffineTransform(translationX: imageSize.width / 2.0, y: imageSize.height / 2.0).rotated(by: -CGFloat.pi / 2.0).translatedBy(x: -imageSize.height / 2.0, y: -imageSize.width / 2.0)
let scale = targetSize.width / shorterSize
scaleTransform = CGAffineTransform(scaleX: scale, y: scale)
// Crop input image to output size
let xDiff = rotatedSize.width * scale - targetSize.width
let yDiff = rotatedSize.height * scale - targetSize.height
cropTransform = CGAffineTransform(translationX: xDiff/2.0, y: yDiff/2.0)
}
// Convert to CIImage because it is easier to manipulate
let ciImage = CIImage(cvImageBuffer: imageBuffer)
let rotated = ciImage.transformed(by: rotateTransform!)
let scaled = rotated.transformed(by: scaleTransform!)
let cropped = scaled.transformed(by: cropTransform!)
// Note that the above pipeline could be easily appended with other image manipulations.
// For example, to change the image contrast. It would be most efficient to handle all of
// the image manipulation in a single Core Image pipeline because it can be hardware optimized.
// Only need to create this buffer one time and then we can reuse it for every frame
if resultBuffer == nil {
let result = CVPixelBufferCreate(kCFAllocatorDefault, Int(targetSize.width), Int(targetSize.height), kCVPixelFormatType_32BGRA, nil, &resultBuffer)
guard result == kCVReturnSuccess else {
fatalError("Can't allocate pixel buffer.")
}
}
// Render the Core Image pipeline to the buffer
context.render(cropped, to: resultBuffer!)
// For debugging
// let image = imageBufferToUIImage(resultBuffer!)
// print(image.size) // set breakpoint to see image being provided to CoreML
return resultBuffer
}
// Only used for debugging.
// Turns an image buffer into a UIImage that is easier to display in the UI or debugger.
func imageBufferToUIImage(_ imageBuffer: CVImageBuffer) -> UIImage {
CVPixelBufferLockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0))
let baseAddress = CVPixelBufferGetBaseAddress(imageBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
let width = CVPixelBufferGetWidth(imageBuffer)
let height = CVPixelBufferGetHeight(imageBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.noneSkipFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)
let quartzImage = context!.makeImage()
CVPixelBufferUnlockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0))
let image = UIImage(cgImage: quartzImage!, scale: 1.0, orientation: .right)
return image
}
I am getting error An AVCaptureOutput instance may not be added to more than one session'
Now I want to give user the facility to toggle flash. How to destroy active camera session and open new with flash on?
Can anyone help me also any other way to achieve this?

Is there a way to check if a UIImage has been decompressed?

I'm sure most of you have dealt with forced decompression on a background thread to enhance rendering performance. My question is whether there is a way to check if an image has been decompressed.
It helped me to checked Image has been Decompressed or not by below technique. It is simple code to understand :-
import UIKit
class ViewController: UIViewController {
var compressedImage:NSString?
var decompressedImage:NSString?
override func viewDidLoad() {
super.viewDidLoad()
let image = compressImage()
var imageView = UIImageView(image: image)
//self.view.addSubview(imageView)
let decompressImage = deCompressImage(image: image)
let imageData = Data(UIImagePNGRepresentation(decompressImage)! )
print("***** Size after decompred \(imageData.description) **** ")
imageView = UIImageView(image: decompressImage)
decompressedImage = imageData.description as NSString?
let decompressed = checkImageBeenDecompressed(decompressedImage: decompressedImage!, compressedImage: compressedImage!)
print(decompressed)
//self.view.addSubview(imageView)
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func checkImageBeenDecompressed(decompressedImage:NSString , compressedImage:NSString) -> Bool {
let decompressedSize = Int( decompressedImage.getNumFromString()! )
let compressedSize = Int (compressedImage.getNumFromString()! )
if( decompressedSize! > compressedSize! ) {
print("Image has been decompressed")
return true
}
print("Image has not been decompressed")
return false
}
func compressImage() -> UIImage {
let oldImage = UIImage(named: "background.jpg")
var imageData = Data(UIImagePNGRepresentation(oldImage!)! )
print("***** Original Uncompressed Size \(imageData.description) **** ")
imageData = UIImageJPEGRepresentation(oldImage!, 0.025)!
print("***** Compressed Size \(imageData.description) **** ")
compressedImage = imageData.description as NSString?
let image = UIImage(data: imageData)
return image!
}
func deCompressImage(image:UIImage) -> UIImage {
UIGraphicsBeginImageContextWithOptions(image.size, true, 0)
image.draw(at: CGPoint.zero)
let decompressedImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return decompressedImage!
}
}
extension NSString {
func getNumFromString() -> String? {
var numberString: NSString?
let thisScanner = Scanner(string: self as String)
let numbers = NSCharacterSet(charactersIn: "0123456789")
thisScanner.scanUpToCharacters(from: numbers as CharacterSet, into: nil)
thisScanner.scanCharacters(from: numbers as CharacterSet, into: &numberString)
return numberString as? String;
}
}
Demo Reference

Resources