Is there a way to check if a UIImage has been decompressed? - ios

I'm sure most of you have dealt with forced decompression on a background thread to enhance rendering performance. My question is whether there is a way to check if an image has been decompressed.

It helped me to checked Image has been Decompressed or not by below technique. It is simple code to understand :-
import UIKit
class ViewController: UIViewController {
var compressedImage:NSString?
var decompressedImage:NSString?
override func viewDidLoad() {
super.viewDidLoad()
let image = compressImage()
var imageView = UIImageView(image: image)
//self.view.addSubview(imageView)
let decompressImage = deCompressImage(image: image)
let imageData = Data(UIImagePNGRepresentation(decompressImage)! )
print("***** Size after decompred \(imageData.description) **** ")
imageView = UIImageView(image: decompressImage)
decompressedImage = imageData.description as NSString?
let decompressed = checkImageBeenDecompressed(decompressedImage: decompressedImage!, compressedImage: compressedImage!)
print(decompressed)
//self.view.addSubview(imageView)
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func checkImageBeenDecompressed(decompressedImage:NSString , compressedImage:NSString) -> Bool {
let decompressedSize = Int( decompressedImage.getNumFromString()! )
let compressedSize = Int (compressedImage.getNumFromString()! )
if( decompressedSize! > compressedSize! ) {
print("Image has been decompressed")
return true
}
print("Image has not been decompressed")
return false
}
func compressImage() -> UIImage {
let oldImage = UIImage(named: "background.jpg")
var imageData = Data(UIImagePNGRepresentation(oldImage!)! )
print("***** Original Uncompressed Size \(imageData.description) **** ")
imageData = UIImageJPEGRepresentation(oldImage!, 0.025)!
print("***** Compressed Size \(imageData.description) **** ")
compressedImage = imageData.description as NSString?
let image = UIImage(data: imageData)
return image!
}
func deCompressImage(image:UIImage) -> UIImage {
UIGraphicsBeginImageContextWithOptions(image.size, true, 0)
image.draw(at: CGPoint.zero)
let decompressedImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return decompressedImage!
}
}
extension NSString {
func getNumFromString() -> String? {
var numberString: NSString?
let thisScanner = Scanner(string: self as String)
let numbers = NSCharacterSet(charactersIn: "0123456789")
thisScanner.scanUpToCharacters(from: numbers as CharacterSet, into: nil)
thisScanner.scanCharacters(from: numbers as CharacterSet, into: &numberString)
return numberString as? String;
}
}
Demo Reference

Related

How do you apply Core Image filters to an onscreen image using Swift/MacOS or iOS and Core Image

Photos editing adjustments provides a realtime view of the applied adjustments as they are applied. I wasn't able to find any samples of how you do this. All the examples seems to show that you apply the filters through a pipeline of sorts and then take the resulting image and update the screen with the result. See code below.
Photos seems to show the adjustment applied to the onscreen image. How do they achieve this?
func editImage(inputImage: CGImage) {
DispatchQueue.global().async {
let beginImage = CIImage(cgImage: inputImage)
guard let exposureOutput = self.exposureFilter(beginImage, ev: self.brightness) else {
return
}
guard let vibranceOutput = self.vibranceFilter(exposureOutput, amount: self.vibranceAmount) else {
return
}
guard let unsharpMaskOutput = self.unsharpMaskFilter(vibranceOutput, intensity: self.unsharpMaskIntensity, radius: self.unsharpMaskRadius) else {
return
}
guard let sharpnessOutput = self.sharpenFilter(unsharpMaskOutput, sharpness: self.unsharpMaskIntensity) else {
return
}
if let cgimg = self.context.createCGImage(sharpnessOutput, from: vibranceOutput.extent) {
DispatchQueue.main.async {
self.cgImage = cgimg
}
}
}
}
OK, I just found the answer - use MTKView, which is working fine except for getting the image to fill the view correctly!
For the benefit of others here are the basics... I have yet to figure out how to position the image correctly in the view - but I can see the filter applied in realtime!
class ViewController: NSViewController, MTKViewDelegate {
....
#objc dynamic var cgImage: CGImage? {
didSet {
if let cgimg = cgImage {
ciImage = CIImage(cgImage: cgimg)
}
}
}
var ciImage: CIImage?
// Metal resources
var device: MTLDevice!
var commandQueue: MTLCommandQueue!
var sourceTexture: MTLTexture! // 2
let colorSpace = CGColorSpaceCreateDeviceRGB()
var context: CIContext!
var textureLoader: MTKTextureLoader!
override func viewDidLoad() {
super.viewDidLoad()
// Do view setup here.
let metalView = MTKView()
metalView.translatesAutoresizingMaskIntoConstraints = false
self.imageView.addSubview(metalView)
NSLayoutConstraint.activate([
metalView.bottomAnchor.constraint(equalTo: view.bottomAnchor),
metalView.trailingAnchor.constraint(equalTo: view.trailingAnchor),
metalView.leadingAnchor.constraint(equalTo: view.leadingAnchor),
metalView.topAnchor.constraint(equalTo: view.topAnchor)
])
device = MTLCreateSystemDefaultDevice()
commandQueue = device.makeCommandQueue()
metalView.delegate = self
metalView.device = device
metalView.framebufferOnly = false
context = CIContext()
textureLoader = MTKTextureLoader(device: device)
}
public func draw(in view: MTKView) {
if let ciImage = self.ciImage {
if let currentDrawable = view.currentDrawable {
let commandBuffer = commandQueue.makeCommandBuffer()
let inputImage = ciImage // 2
exposureFilter.setValue(inputImage, forKey: kCIInputImageKey)
exposureFilter.setValue(ev, forKey: kCIInputEVKey)
context.render(exposureFilter.outputImage!,
to: currentDrawable.texture,
commandBuffer: commandBuffer,
bounds: CGRect(origin: .zero, size: view.drawableSize),
colorSpace: colorSpace)
commandBuffer?.present(currentDrawable)
commandBuffer?.commit()
}
}
}

Render a MTIImage

Please don't judge me I'm just learning Swift.
Recently I installed MetalPetal framework and I followed the instructions:
https://github.com/MetalPetal/MetalPetal#example-code
But I get error because of MTIContext. Maybe I have to declare something more of MetalPetal?
My Code:
import UIKit
import MetalPetal
import CoreGraphics
class ViewController: UIViewController {
#IBOutlet weak var image1: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
weak var image: UIImage?
image = image1.image
var ciImage = CIImage(image: image!)
var cgImage1 = convertCIImageToCGImage(inputImage: ciImage!)
let imageFromCGImage = MTIImage(cgImage: cgImage1!)
let inputImage = imageFromCGImage
let filter = MTISaturationFilter()
filter.saturation = 1
filter.inputImage = inputImage
let outputImage = filter.outputImage
let context = MTIContext()
do {
try context.render(outputImage, to: pixelBuffer)
var image3: CIImage? = try context.makeCIImage(from: outputImage!)
//context.makeCIImage(from: image)
//context.makeCGImage(from: image)
} catch {
print(error)
}
// Do any additional setup after loading the view, typically from a nib.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func convertCIImageToCGImage(inputImage: CIImage) -> CGImage? {
let context = CIContext(options: nil)
if let cgImage = context.createCGImage(inputImage, from: inputImage.extent) {
return cgImage
}
return nil
}
}
#YuAo
Input Image
An UIImage is based on either underlying Quartz image (can be retrieved with cgImage) or an underlying Core Image (can be retrieved from UIImage with ciImage).
MTIImage offers constructors for both types.
MTIContext
A MTIContext must be initialized with a device that can be retrieved by calling MTLCreateSystemDefaultDevice().
Rendering
A rendering to a pixel buffer is not needed. We can get the result by calling makeCGImage.
Test
I've taken your source code above and slightly adapted it to the aforementioned points.
I also added a second UIImageView to see the result of the filtering. I also changed the saturation to 0 to see if the filter works
If GPU or shaders are involved it makes sense to test on a real device and not on the simulator.
The result looks like this:
In the upper area you see the original jpg, in the lower area the filter is applied.
Swift
The simplified Swift code that produces this result looks like this:
override func viewDidLoad() {
super.viewDidLoad()
guard let image = UIImage(named: "regensburg.jpg") else { return }
guard let cgImage = image.cgImage else { return }
imageView1.image = image
let filter = MTISaturationFilter()
filter.saturation = 0
filter.inputImage = MTIImage(cgImage: cgImage)
if let device = MTLCreateSystemDefaultDevice(),
let outputImage = filter.outputImage {
do {
let context = try MTIContext(device: device)
let filteredImage = try context.makeCGImage(from: outputImage)
imageView2.image = UIImage(cgImage: filteredImage)
} catch {
print(error)
}
}
}

Reduce memory consumption while loading gif images in UIImageView

I want to show gif image in a UIImageView and with the code below (source: https://iosdevcenters.blogspot.com/2016/08/load-gif-image-in-swift_22.html, *I did not understand all the codes), I am able to display gif images. However, the memory consumption seems high (tested on real device). Is there any way to modify the code below to reduce the memory consumption?
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
let url = "https://cdn-images-1.medium.com/max/800/1*oDqXedYUMyhWzN48pUjHyw.gif"
let gifImage = UIImage.gifImageWithURL(url)
imageView.image = gifImage
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
fileprivate func < <T : Comparable>(lhs: T?, rhs: T?) -> Bool {
switch (lhs, rhs) {
case let (l?, r?):
return l < r
case (nil, _?):
return true
default:
return false
}
}
extension UIImage {
public class func gifImageWithData(_ data: Data) -> UIImage? {
guard let source = CGImageSourceCreateWithData(data as CFData, nil) else {
print("image doesn't exist")
return nil
}
return UIImage.animatedImageWithSource(source)
}
public class func gifImageWithURL(_ gifUrl:String) -> UIImage? {
guard let bundleURL:URL? = URL(string: gifUrl) else {
return nil
}
guard let imageData = try? Data(contentsOf: bundleURL!) else {
return nil
}
return gifImageWithData(imageData)
}
public class func gifImageWithName(_ name: String) -> UIImage? {
guard let bundleURL = Bundle.main
.url(forResource: name, withExtension: "gif") else {
return nil
}
guard let imageData = try? Data(contentsOf: bundleURL) else {
return nil
}
return gifImageWithData(imageData)
}
class func delayForImageAtIndex(_ index: Int, source: CGImageSource!) -> Double {
var delay = 0.1
let cfProperties = CGImageSourceCopyPropertiesAtIndex(source, index, nil)
let gifProperties: CFDictionary = unsafeBitCast(
CFDictionaryGetValue(cfProperties,
Unmanaged.passUnretained(kCGImagePropertyGIFDictionary).toOpaque()),
to: CFDictionary.self)
var delayObject: AnyObject = unsafeBitCast(
CFDictionaryGetValue(gifProperties,
Unmanaged.passUnretained(kCGImagePropertyGIFUnclampedDelayTime).toOpaque()),
to: AnyObject.self)
if delayObject.doubleValue == 0 {
delayObject = unsafeBitCast(CFDictionaryGetValue(gifProperties,
Unmanaged.passUnretained(kCGImagePropertyGIFDelayTime).toOpaque()), to: AnyObject.self)
}
delay = delayObject as! Double
if delay < 0.1 {
delay = 0.1
}
return delay
}
class func gcdForPair(_ a: Int?, _ b: Int?) -> Int {
var a = a
var b = b
if b == nil || a == nil {
if b != nil {
return b!
} else if a != nil {
return a!
} else {
return 0
}
}
if a < b {
let c = a
a = b
b = c
}
var rest: Int
while true {
rest = a! % b!
if rest == 0 {
return b!
} else {
a = b
b = rest
}
}
}
class func gcdForArray(_ array: Array<Int>) -> Int {
if array.isEmpty {
return 1
}
var gcd = array[0]
for val in array {
gcd = UIImage.gcdForPair(val, gcd)
}
return gcd
}
class func animatedImageWithSource(_ source: CGImageSource) -> UIImage? {
let count = CGImageSourceGetCount(source)
var images = [CGImage]()
var delays = [Int]()
for i in 0..<count {
if let image = CGImageSourceCreateImageAtIndex(source, i, nil) {
images.append(image)
}
let delaySeconds = UIImage.delayForImageAtIndex(Int(i),
source: source)
delays.append(Int(delaySeconds * 1000.0)) // Seconds to ms
}
let duration: Int = {
var sum = 0
for val: Int in delays {
sum += val
}
return sum
}()
let gcd = gcdForArray(delays)
var frames = [UIImage]()
var frame: UIImage
var frameCount: Int
for i in 0..<count {
frame = UIImage(cgImage: images[Int(i)])
frameCount = Int(delays[Int(i)] / gcd)
for _ in 0..<frameCount {
frames.append(frame)
}
}
let animation = UIImage.animatedImage(with: frames,
duration: Double(duration) / 1000.0)
return animation
}
}
When I render the image as normal png image, the consumption is around 10MB.
The GIF in question has a resolution of 480×288 and contains 10 frames.
Considering that UIImageView stores frames as 4-byte RGBA, this GIF occupies 4 × 10 × 480 × 288 = 5 529 600 bytes in RAM, which is more than 5 megabytes.
There are numerous ways to mitigate that, but only one of them puts no additional strain on the CPU; the others are mere CPU-to-RAM trade-offs.
The method I`m talking about is subclassing UIImageView and loading your GIFs by hand, preserving their internal representation (indexed image + palette). It would allow you to cut the memory usage fourfold.
N.B.: even though GIFs may be stored as full images for each frame (which is the case for the GIF in question), many are not. On the contrary, most of the frames can only contain the pixels that have changed since the previous one. Thus, in general the internal GIF representation only allows to display frames in direct order.
Other methods of saving RAM include e.g. re-reading every frame from disk prior to displaying it, which is certainly not good for battery life.
To display GIFs with less memory consumption, try BBWebImage.
BBWebImage will decide how many image frames are decoded and cached depending on current memory usage. If free memory is not enough, only part of image frames are decoded and cached.
For Swift 4:
// BBAnimatedImageView (subclass UIImageView) displays animated image
imageView = BBAnimatedImageView(frame: frame)
// Load and display gif
imageView.bb_setImage(with: url,
placeholder: UIImage(named: "placeholder"))
{ (image: UIImage?, data: Data?, error: Error?, cacheType: BBImageCacheType) in
// Do something when finish loading
}

Toggle flash in ios swift

I am building an image clasifier app. On camera screen I have a switch button which I want to use to toggle flash so that user can switch on flash in low light.
Here is my code:
import UIKit
import AVFoundation
import Vision
// controlling the pace of the machine vision analysis
var lastAnalysis: TimeInterval = 0
var pace: TimeInterval = 0.33 // in seconds, classification will not repeat faster than this value
// performance tracking
let trackPerformance = false // use "true" for performance logging
var frameCount = 0
let framesPerSample = 10
var startDate = NSDate.timeIntervalSinceReferenceDate
var flash=0
class ImageDetectionViewController: UIViewController {
var callBackImageDetection :(State)->Void = { state in
}
#IBOutlet weak var previewView: UIView!
#IBOutlet weak var stackView: UIStackView!
#IBOutlet weak var lowerView: UIView!
#IBAction func swithch(_ sender: UISwitch) {
if(sender.isOn == true)
{
stopActiveSession();
let captureSession=AVCaptureSession()
let captureDevice: AVCaptureDevice?
setupCamera(flash: 1)
}
}
var previewLayer: AVCaptureVideoPreviewLayer!
let bubbleLayer = BubbleLayer(string: "")
let queue = DispatchQueue(label: "videoQueue")
var captureSession = AVCaptureSession()
var captureDevice: AVCaptureDevice?
let videoOutput = AVCaptureVideoDataOutput()
var unknownCounter = 0 // used to track how many unclassified images in a row
let confidence: Float = 0.8
// MARK: Load the Model
let targetImageSize = CGSize(width: 227, height: 227) // must match model data input
lazy var classificationRequest: [VNRequest] = {
do {
// Load the Custom Vision model.
// To add a new model, drag it to the Xcode project browser making sure that the "Target Membership" is checked.
// Then update the following line with the name of your new model.
// let model = try VNCoreMLModel(for: Fruit().model)
let model = try VNCoreMLModel(for: CodigocubeAI().model)
let classificationRequest = VNCoreMLRequest(model: model, completionHandler: self.handleClassification)
return [ classificationRequest ]
} catch {
fatalError("Can't load Vision ML model: \(error)")
}
}()
// MARK: Handle image classification results
func handleClassification(request: VNRequest, error: Error?) {
guard let observations = request.results as? [VNClassificationObservation]
else { fatalError("unexpected result type from VNCoreMLRequest") }
guard let best = observations.first else {
fatalError("classification didn't return any results")
}
// Use results to update user interface (includes basic filtering)
print("\(best.identifier): \(best.confidence)")
if best.identifier.starts(with: "Unknown") || best.confidence < confidence {
if self.unknownCounter < 3 { // a bit of a low-pass filter to avoid flickering
self.unknownCounter += 1
} else {
self.unknownCounter = 0
DispatchQueue.main.async {
self.bubbleLayer.string = nil
}
}
} else {
self.unknownCounter = 0
DispatchQueue.main.async {[weak self] in
guard let strongSelf = self
else
{
return
}
// Trimming labels because they sometimes have unexpected line endings which show up in the GUI
let identifierString = best.identifier.trimmingCharacters(in: CharacterSet.whitespacesAndNewlines)
strongSelf.bubbleLayer.string = identifierString
let state : State = strongSelf.getState(identifierStr: identifierString)
strongSelf.stopActiveSession()
strongSelf.navigationController?.popViewController(animated: true)
strongSelf.callBackImageDetection(state)
}
}
}
func getState(identifierStr:String)->State
{
var state :State = .none
if identifierStr == "entertainment"
{
state = .entertainment
}
else if identifierStr == "geography"
{
state = .geography
}
else if identifierStr == "history"
{
state = .history
}
else if identifierStr == "knowledge"
{
state = .education
}
else if identifierStr == "science"
{
state = .science
}
else if identifierStr == "sports"
{
state = .sports
}
else
{
state = .none
}
return state
}
// MARK: Lifecycle
override func viewDidLoad() {
super.viewDidLoad()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewView.layer.addSublayer(previewLayer)
}
override func viewDidAppear(_ animated: Bool) {
self.edgesForExtendedLayout = UIRectEdge.init(rawValue: 0)
bubbleLayer.opacity = 0.0
bubbleLayer.position.x = self.view.frame.width / 2.0
bubbleLayer.position.y = lowerView.frame.height / 2
lowerView.layer.addSublayer(bubbleLayer)
setupCamera(flash:2)
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
previewLayer.frame = previewView.bounds;
}
// MARK: Camera handling
func setupCamera(flash :Int) {
let deviceDiscovery = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .back)
if let device = deviceDiscovery.devices.last {
if(flash == 1)
{
if (device.hasTorch) {
do {
try device.lockForConfiguration()
if (device.isTorchAvailable) {
do {
try device.setTorchModeOn(level:0.2 )
}
catch
{
print(error)
}
device.unlockForConfiguration()
}
}
catch
{
print(error)
}
}
}
captureDevice = device
beginSession()
}
}
func beginSession() {
do {
videoOutput.videoSettings = [((kCVPixelBufferPixelFormatTypeKey as NSString) as String) : (NSNumber(value: kCVPixelFormatType_32BGRA) as! UInt32)]
videoOutput.alwaysDiscardsLateVideoFrames = true
videoOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.sessionPreset = .hd1920x1080
captureSession.addOutput(videoOutput)
let input = try AVCaptureDeviceInput(device: captureDevice!)
captureSession.addInput(input)
captureSession.startRunning()
} catch {
print("error connecting to capture device")
}
}
func stopActiveSession()
{
if captureSession.isRunning == true
{
captureSession.stopRunning()
}
}
override func viewWillDisappear(_ animated: Bool) {
self.stopActiveSession()
}
deinit {
print("deinit called")
}
}
// MARK: Video Data Delegate
extension ImageDetectionViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
// called for each frame of video
func captureOutput(_ captureOutput: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let currentDate = NSDate.timeIntervalSinceReferenceDate
// control the pace of the machine vision to protect battery life
if currentDate - lastAnalysis >= pace {
lastAnalysis = currentDate
} else {
return // don't run the classifier more often than we need
}
// keep track of performance and log the frame rate
if trackPerformance {
frameCount = frameCount + 1
if frameCount % framesPerSample == 0 {
let diff = currentDate - startDate
if (diff > 0) {
if pace > 0.0 {
print("WARNING: Frame rate of image classification is being limited by \"pace\" setting. Set to 0.0 for fastest possible rate.")
}
print("\(String.localizedStringWithFormat("%0.2f", (diff/Double(framesPerSample))))s per frame (average)")
}
startDate = currentDate
}
}
// Crop and resize the image data.
// Note, this uses a Core Image pipeline that could be appended with other pre-processing.
// If we don't want to do anything custom, we can remove this step and let the Vision framework handle
// crop and resize as long as we are careful to pass the orientation properly.
guard let croppedBuffer = croppedSampleBuffer(sampleBuffer, targetSize: targetImageSize) else {
return
}
do {
let classifierRequestHandler = VNImageRequestHandler(cvPixelBuffer: croppedBuffer, options: [:])
try classifierRequestHandler.perform(classificationRequest)
} catch {
print(error)
}
}
}
let context = CIContext()
var rotateTransform: CGAffineTransform?
var scaleTransform: CGAffineTransform?
var cropTransform: CGAffineTransform?
var resultBuffer: CVPixelBuffer?
func croppedSampleBuffer(_ sampleBuffer: CMSampleBuffer, targetSize: CGSize) -> CVPixelBuffer? {
guard let imageBuffer: CVImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
fatalError("Can't convert to CVImageBuffer.")
}
// Only doing these calculations once for efficiency.
// If the incoming images could change orientation or size during a session, this would need to be reset when that happens.
if rotateTransform == nil {
let imageSize = CVImageBufferGetEncodedSize(imageBuffer)
let rotatedSize = CGSize(width: imageSize.height, height: imageSize.width)
guard targetSize.width < rotatedSize.width, targetSize.height < rotatedSize.height else {
fatalError("Captured image is smaller than image size for model.")
}
let shorterSize = (rotatedSize.width < rotatedSize.height) ? rotatedSize.width : rotatedSize.height
rotateTransform = CGAffineTransform(translationX: imageSize.width / 2.0, y: imageSize.height / 2.0).rotated(by: -CGFloat.pi / 2.0).translatedBy(x: -imageSize.height / 2.0, y: -imageSize.width / 2.0)
let scale = targetSize.width / shorterSize
scaleTransform = CGAffineTransform(scaleX: scale, y: scale)
// Crop input image to output size
let xDiff = rotatedSize.width * scale - targetSize.width
let yDiff = rotatedSize.height * scale - targetSize.height
cropTransform = CGAffineTransform(translationX: xDiff/2.0, y: yDiff/2.0)
}
// Convert to CIImage because it is easier to manipulate
let ciImage = CIImage(cvImageBuffer: imageBuffer)
let rotated = ciImage.transformed(by: rotateTransform!)
let scaled = rotated.transformed(by: scaleTransform!)
let cropped = scaled.transformed(by: cropTransform!)
// Note that the above pipeline could be easily appended with other image manipulations.
// For example, to change the image contrast. It would be most efficient to handle all of
// the image manipulation in a single Core Image pipeline because it can be hardware optimized.
// Only need to create this buffer one time and then we can reuse it for every frame
if resultBuffer == nil {
let result = CVPixelBufferCreate(kCFAllocatorDefault, Int(targetSize.width), Int(targetSize.height), kCVPixelFormatType_32BGRA, nil, &resultBuffer)
guard result == kCVReturnSuccess else {
fatalError("Can't allocate pixel buffer.")
}
}
// Render the Core Image pipeline to the buffer
context.render(cropped, to: resultBuffer!)
// For debugging
// let image = imageBufferToUIImage(resultBuffer!)
// print(image.size) // set breakpoint to see image being provided to CoreML
return resultBuffer
}
// Only used for debugging.
// Turns an image buffer into a UIImage that is easier to display in the UI or debugger.
func imageBufferToUIImage(_ imageBuffer: CVImageBuffer) -> UIImage {
CVPixelBufferLockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0))
let baseAddress = CVPixelBufferGetBaseAddress(imageBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
let width = CVPixelBufferGetWidth(imageBuffer)
let height = CVPixelBufferGetHeight(imageBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.noneSkipFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)
let quartzImage = context!.makeImage()
CVPixelBufferUnlockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0))
let image = UIImage(cgImage: quartzImage!, scale: 1.0, orientation: .right)
return image
}
I am getting error An AVCaptureOutput instance may not be added to more than one session'
Now I want to give user the facility to toggle flash. How to destroy active camera session and open new with flash on?
Can anyone help me also any other way to achieve this?

How to generate random image using image view in swift

I just followed treehouse course and create my first Fun Fact app.In that they generate a random array quotes.
Needed:
I have placed image view using storyboard.Already when pressing one button random array quotes will generate.But i need when that same button pressed a random image should generate.I am new to swift .!
This is factbook.swift
struct FactBook {
// stored in arry to show all quotes
let factsArray = [
"You have to dream before your dreams can come true.",
"To succeed in your mission, you must have single-minded devotion to your goal.",
"You have to dream before your dreams can come true.",
"Love your job but don’t love your company, because you may not know when your company stops loving you.",
"Failure will never overtake me if my definition to succeed is strong enough.",
]
//make a random quote
func randomFact() -> String {
//
// let unsignedRandomNumber = arc4random_uniform(unsignedArrayCount)
//
let unsignedArrayCount = UInt32(factsArray.count)
let unsignedRandomNumber = arc4random_uniform(unsignedArrayCount)
let randomNumber = Int(unsignedRandomNumber)
//
// let unsignedRandomNumber = arc4random_uniform(unsignedArrayCount)
// let randomNumber = Int(signedRandomNumber)
return factsArray[randomNumber]
}
}
This is viewcontroller.swift
class ViewController: UIViewController {
#IBOutlet weak var funFactLabel: UILabel!
#IBOutlet weak var funFactButton: UIButton!
#IBOutlet weak var imgV: UIImageView!
let factBook = FactBook()
let colorWheel = ColorWheel()
//method to define
// let yourImage = UIImage(named: "apj")
// let imageview = UIImageView(image: yourImage)
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
funFactLabel.text = factBook.randomFact()
self.view.backgroundColor = UIColor(patternImage: UIImage(named: "apj")!)
// let yourImage = UIImage(named: "apj")
// let imageview = UIImageView(image: yourImage)
// self.view.addSubview(imageview)
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
#IBAction func showFunFact() {
let randomColor = colorWheel.randomColor()
view.backgroundColor = randomColor
funFactButton.tintColor = randomColor
//funFactButton.tintColor = clearcolor
funFactLabel.text = factBook.randomFact()
}
}
The solution mainly is to use the same approach you have done with the random text. So to sum up, you should have an array of the images, and a function to select a random image. Then call that function from your view controller. A possible implementation to this approach is:
Add this array to your FactBook
let factsImagesArray = [
"image1.png",
"image2.png",
"image3.png",
"image4.png",
"image5.png",
]
Add this method to your FactBook
func randomFactImage() -> UIImage {
let unsignedArrayCount = UInt32(factsImageArray.count)
let unsignedRandomNumber = arc4random_uniform(unsignedArrayCount)
let randomNumber = Int(unsignedRandomNumber)
return UIImage(named: factsImageArray[randomNumber])!
}
and in your viewcontroller change showFunFact to:
#IBAction func showFunFact() {
let randomColor = colorWheel.randomColor()
view.backgroundColor = randomColor
funFactButton.tintColor = randomColor
funFactLabel.text = factBook.randomFact()
imgV.image = faceBook.randomFactImage()
}
Ofc you should have the image1.png, image2.png ... in your resources
#IBAction func randomimage(sender: AnyObject)
{
//list of Images in array
let image : NSArray = [ UIImage(named: "1.jpg")!,
UIImage(named: "2.jpg")!,
UIImage(named: "3.jpg")!,
UIImage(named: "4.jpg")!,
UIImage(named: "5.jpg")!,
UIImage(named: "6.jpg")!,
UIImage(named: "7.jpg")!]
//random image generating method
let imagerange: UInt32 = UInt32(image.count)
let randomimage = Int(arc4random_uniform(imagerange))
let generatedimage: AnyObject = image.objectAtIndex(randomimage)
self.myimage.image = generatedimage as? UIImage
}

Resources