Comparing two images and finding the percent difference - ios

I've been trying to make an animal recognition app by image. My method is to compare the selected image by other images in an image array and list any comparisons that yield in a 90%+ similarity. Is there any other ways to compare two images that are similar, yet not alike? Any suggestions would be appreciated. These calculations must also run for many iterations, so a non-time consuming method would be much appreciated.
Please try to provide some code with the answer, as I am not very experienced in Swift.

you can do something like I did by analyzing one frame to the next and thresholding the difference (I didn't scale the image down yet, and probably should)
// called everytime a frame is captured
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let imageBufferRef = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
CVPixelBufferLockBaseAddress(imageBufferRef, []);
// set up for comparisons between prev and next images
if prevPB == nil {
prevPB = imageBufferRef
}
if (!isPlaying && motionIsDetected(prevPB, imageBufferRef) == 1) {
isPlaying = true
print("play video")
}
CVPixelBufferUnlockBaseAddress(imageBufferRef,[]);
// if true, play the video
prevPB = imageBufferRef
}
func pixelFrom(x: Int, y: Int, current: CVPixelBuffer) -> (UInt8, UInt8, UInt8) {
let baseAddress = CVPixelBufferGetBaseAddress(current)
let bytesPerRow = CVPixelBufferGetBytesPerRow(current)
let buffer = baseAddress!.assumingMemoryBound(to: UInt8.self)
let index = x*bytesPerRow+y
let b = buffer[index]
let g = buffer[index+1]
let r = buffer[index+2]
return (r, g, b)
}
func motionIsDetected(_ prev:CVPixelBuffer, _ current:CVPixelBuffer) -> Int {
var differences = 0
let baseAddress = CVPixelBufferGetBaseAddress(current)
let width = CVPixelBufferGetWidth(current)
let height = CVPixelBufferGetHeight(current)
// THRESHOLDING: clamp by abs(aa-bb) for tuple of r,b,g if 150 difference and at least 10 different pixels
var MAGIC_THRESHOLD = 120
var ENOUGH_DIFFERENCES = 10
if (current != nil && prev != nil) {
for x in 0..<height { //rows
for y in 0..<width { //cols
var setA = pixelFrom(x: x, y: y, current: prev)
var setB = pixelFrom(x: x, y: y, current: current)
if abs(Int(setA.0) - Int(setB.0)) > MAGIC_THRESHOLD && abs(Int(setA.1) - Int(setB.1)) > MAGIC_THRESHOLD && abs(Int(setA.2) - Int(setB.2)) > MAGIC_THRESHOLD {
differences = differences + 1
}
}
}
}
print(" difference" )
print(differences)
if differences > ENOUGH_DIFFERENCES {
return 1
}
return 0
}

Related

Depth map normalization

I am applying CIDepthBlurEffect filter to a PHAsset as follows:
PHImageManager.default().requestImageDataAndOrientation(for: self.asset!, options: imageOptions) { (data, responseString, imageOrientation, info) in
if data != nil {
DispatchQueue.main.async {
if let depthImage = CIImage(data: data!, options: [CIImageOption.auxiliaryDisparity : true])?.oriented(imageOrientation) {
if let newMainImage = CIImage(data: data!)?.oriented(imageOrientation) {
let filter = CIFilter(name : "CIDepthBlurEffect",
parameters: [kCIInputImageKey : newMainImage,
kCIInputDisparityImageKey: depthImage,
"inputAperture" : withRadius])
self.imageView.image = UIImage(ciImage: filter!.outputImage!)
}
}
}
}
This works great most of the time. However, sometimes, the filter blur planes seem to be off. Almost as the Z plane is wrong or shifted.
Does this method require the depth data to be normalized?
Previously, when CVPixelBuffer was used, I was using this extension:
final func normalize() -> CVPixelBuffer {
let width = CVPixelBufferGetWidth(self)
let height = CVPixelBufferGetHeight(self)
CVPixelBufferLockBaseAddress(self, CVPixelBufferLockFlags(rawValue: 0))
let floatBuffer = unsafeBitCast(CVPixelBufferGetBaseAddress(self), to: UnsafeMutablePointer<Float>.self)
var minPixel: Float = 1.0
var maxPixel: Float = 0.0
for y in 0 ..< height {
for x in 0 ..< width {
let pixel = floatBuffer[y * width + x]
minPixel = min(pixel, minPixel)
maxPixel = max(pixel, maxPixel)
}
}
let range = maxPixel - minPixel
for y in 0 ..< height {
for x in 0 ..< width {
let pixel = floatBuffer[y * width + x]
floatBuffer[y * width + x] = (pixel - minPixel) / range
}
}
CVPixelBufferUnlockBaseAddress(self, CVPixelBufferLockFlags(rawValue: 0))
return self
}
Is there such a way to do this to a CIImage?

AVCaptureVideoDataOutputSampleBufferDelegate drop frames using CIFilters for video filtering

I have very strange case where AVCaptureVideoDataOutputSampleBufferDelegate drops frames if I use 13 different filter chains. Let me explain:
I have CameraController setup, nothing special, here is my delegate method:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if !paused {
if connection.output?.connection(with: .audio) == nil {
//capture video
// my try to avoid "Out of buffers error", no luck ;(
lastCapturedBuffer = nil
let err = CMSampleBufferCreateCopy(allocator: kCFAllocatorDefault, sampleBuffer: sampleBuffer, sampleBufferOut: &lastCapturedBuffer)
if err == noErr {
}
connection.videoOrientation = .portrait
// getting image
let pixelBuffer = CMSampleBufferGetImageBuffer(lastCapturedBuffer!)
// remove if any
CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
// captured - is just ciimage property
captured = CIImage(cvPixelBuffer: pixelBuffer!)
//remove if any
CVPixelBufferUnlockBaseAddress(pixelBuffer!,CVPixelBufferLockFlags(rawValue: 0))
//CVPixelBufferUnlockBaseAddress(pixelBuffer!, .readOnly)
// transform image to targer resolution
let srcWidth = CGFloat(captured.extent.width)
let srcHeight = CGFloat(captured.extent.height)
let dstWidth: CGFloat = ConstantsManager.shared.k_video_width
let dstHeight: CGFloat = ConstantsManager.shared.k_video_height
let scaleX = dstWidth / srcWidth
let scaleY = dstHeight / srcHeight
var transform = CGAffineTransform.init(scaleX: scaleX, y: scaleY)
captured = captured.transformed(by: transform).cropped(to: CGRect(x: 0, y: 0, width: dstWidth, height: dstHeight))
// mirror for front camera
if front {
var t = CGAffineTransform.init(scaleX: -1, y: 1)
t = t.translatedBy(x: -ConstantsManager.shared.k_video_width, y: 0)
captured = captured.transformed(by: t)
}
// video capture logic
let writable = canWrite()
if writable,
sessionAtSourceTime == nil {
sessionAtSourceTime = CMSampleBufferGetPresentationTimeStamp(lastCapturedBuffer!)
videoWriter.startSession(atSourceTime: sessionAtSourceTime!)
}
if writable, (videoWriterInput.isReadyForMoreMediaData) {
videoWriterInput.append(lastCapturedBuffer!)
}
// apply effect in realtime <- here is problem. If I comment next line, it will be fixed but effect will n't be applied
captured = FilterManager.shared.applyFilterForCamera(inputImage: captured)
// current frame in case user wants to save image as photo
self.capturedPhoto = captured
// sent frame to Camcoder view controller
self.delegate?.didCapturedFrame(frame: captured)
} else {
// capture sound
let writable = canWrite()
if writable, (audioWriterInput.isReadyForMoreMediaData) {
//print("write audio buffer")
audioWriterInput?.append(lastCapturedBuffer!)
}
}
} else {
// paused
}
}
I also implemented didDrop delegate method, here is how I figure out why it drops frames:
func captureOutput(_ output: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print("did drop")
var mode: CMAttachmentMode = 0
let reason = CMGetAttachment(sampleBuffer, key: kCMSampleBufferAttachmentKey_DroppedFrameReason, attachmentModeOut: &mode)
print("reason \(String(describing: reason))") // Optional(OutOfBuffers)
}
So I did it like a pro and just commented parts of code to find where is the problem. So, it here:
captured = FilterManager.shared.applyFilterForCamera(inputImage: captured)
FilterManager - is singleton, here is called func:
func applyFilterForCamera(inputImage: CIImage) -> CIImage {
return currentVsFilter!.apply(sourceImage: inputImage)
}
currentVsFilter is object of VSFilter type - here is example of one:
import Foundation
import AVKit
class TestFilter: CustomFilter {
let _name = "Тестовый Фильтр"
let _displayName = "Test Filter"
var tempImage: CIImage?
var final: CGImage?
override func name() -> String {
return _name
}
override func displayName() -> String {
return _displayName
}
override init() {
super.init()
print("Test Filter init")
// setup my custom kernel filter
self.noise.type = GlitchFilter.GlitchType.allCases[2]
}
// this returns composition for playback using AVPlayer
override func composition(asset: AVAsset) -> AVMutableVideoComposition {
let composition = AVMutableVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
let inputImage = request.sourceImage.cropped(to: request.sourceImage.extent)
DispatchQueue.global(qos: .userInitiated).async {
let output = self.apply(sourceImage: inputImage, forComposition: true)
request.finish(with: output, context: nil)
}
})
let size = FilterManager.shared.cropRectForOrientation().size
composition.renderSize = size
return composition
}
// this returns actual filtered CIImage, used for both AVPlayer composition and realtime camera
override func apply(sourceImage: CIImage, forComposition: Bool = false) -> CIImage {
// rendered text
tempImage = FilterManager.shared.textRenderedImage()
// some filters chained one by one
self.screenBlend?.setValue(tempImage, forKey: kCIInputImageKey)
self.screenBlend?.setValue(sourceImage, forKey: kCIInputBackgroundImageKey)
self.noise.inputImage = self.screenBlend?.outputImage
self.noise.inputAmount = CGFloat.random(in: 1.0...3.0)
// result
tempImage = self.noise.outputImage
// correct crop
let rect = forComposition ? FilterManager.shared.cropRectForOrientation() : FilterManager.shared.cropRect
final = self.context.createCGImage(tempImage!, from: rect!)
return CIImage(cgImage: final!)
}
}
And now, the most strange thing, I have 30 VSFilters and when I got to 13(switching one by one by UIButton) I got error "Out of Buffer", this one:
kCMSampleBufferDroppedFrameReason_OutOfBuffers
What I tested:
I changed vsFilters order in filters array inside FilterManager singleton - same
I tried switch from first to 12 one by one, then go back - works, but after I switched to 13tn(of 30th from 0) - bug
Looks like it can handle only 12 VSFIlter objects, like if it retains them somehow or maybe it's related to threading, I don't know.
This app made for iOs devices, tested on iPhone X iOs 13.3.1
This is video editor app to apply different effects to both live stream from camera and video files from camera roll
Maybe someone has experience with this?
Have a great day
Best, Victor
Edit 1. If I reinit cameraController(AVCaptureSession. input/output devices) it works but this is ugly option and it adds lag when switching filters
Ok, so I finally won this battle. In case some one else get this "OutOfBuffer" problem, here is my solution
As I figured out, CIFilter grabs CVPixelBuffer and don't release it while filtering images. It's kinda creates one huge buffer, I guess. Strange thing: it don't create memory leak, so I guess it grabs not particular buffer but creates strong reference to it. As rumors(me) say, it can handle only 12 such references.
So, my approach was to copy CVPixelBuffer and then work with it instead of buffer I got from AVCaptureVideoDataOutputSampleBufferDelegate didOutput func
Here is my new code:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if !paused {
//print("camera controller \(id) got frame")
if connection.output?.connection(with: .audio) == nil {
//capture video
connection.videoOrientation = .portrait
// getting image
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// this works!
let copyBuffer = pixelBuffer.copy()
// captured - is just ciimage property
captured = CIImage(cvPixelBuffer: copyBuffer)
//remove if any
// transform image to targer resolution
let srcWidth = CGFloat(captured.extent.width)
let srcHeight = CGFloat(captured.extent.height)
let dstWidth: CGFloat = ConstantsManager.shared.k_video_width
let dstHeight: CGFloat = ConstantsManager.shared.k_video_height
let scaleX = dstWidth / srcWidth
let scaleY = dstHeight / srcHeight
var transform = CGAffineTransform.init(scaleX: scaleX, y: scaleY)
captured = captured.transformed(by: transform).cropped(to: CGRect(x: 0, y: 0, width: dstWidth, height: dstHeight))
// mirror for front camera
if front {
var t = CGAffineTransform.init(scaleX: -1, y: 1)
t = t.translatedBy(x: -ConstantsManager.shared.k_video_width, y: 0)
captured = captured.transformed(by: t)
}
// video capture logic
let writable = canWrite()
if writable,
sessionAtSourceTime == nil {
sessionAtSourceTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
videoWriter.startSession(atSourceTime: sessionAtSourceTime!)
}
if writable, (videoWriterInput.isReadyForMoreMediaData) {
videoWriterInput.append(sampleBuffer)
}
self.captured = FilterManager.shared.applyFilterForCamera(inputImage: self.captured)
// current frame in case user wants to save image as photo
self.capturedPhoto = captured
// sent frame to Camcoder view controller
self.delegate?.didCapturedFrame(frame: captured)
} else {
// capture sound
let writable = canWrite()
if writable, (audioWriterInput.isReadyForMoreMediaData) {
//print("write audio buffer")
audioWriterInput?.append(sampleBuffer)
}
}
} else {
// paused
//print("paused camera controller \(id)")
}
}
and there is func to copy buffer:
func copy() -> CVPixelBuffer {
precondition(CFGetTypeID(self) == CVPixelBufferGetTypeID(), "copy() cannot be called on a non-CVPixelBuffer")
var _copy : CVPixelBuffer?
CVPixelBufferCreate(
kCFAllocatorDefault,
CVPixelBufferGetWidth(self),
CVPixelBufferGetHeight(self),
CVPixelBufferGetPixelFormatType(self),
nil,
&_copy)
guard let copy = _copy else { fatalError() }
CVPixelBufferLockBaseAddress(self, CVPixelBufferLockFlags.readOnly)
CVPixelBufferLockBaseAddress(copy, CVPixelBufferLockFlags(rawValue: 0))
let copyBaseAddress = CVPixelBufferGetBaseAddress(copy)
let currBaseAddress = CVPixelBufferGetBaseAddress(self)
print("copy data size: \(CVPixelBufferGetDataSize(copy))")
print("self data size: \(CVPixelBufferGetDataSize(self))")
memcpy(copyBaseAddress, currBaseAddress, CVPixelBufferGetDataSize(copy))
//memcpy(copyBaseAddress, currBaseAddress, CVPixelBufferGetDataSize(self) * 2)
CVPixelBufferUnlockBaseAddress(copy, CVPixelBufferLockFlags(rawValue: 0))
CVPixelBufferUnlockBaseAddress(self, CVPixelBufferLockFlags.readOnly)
return copy
}
I used it as extension
I hope, this will help anyone with similar problem
Best, Victor

Copy a CVPixelBuffer on any iOS device

I'm having a great deal of difficulty coming up with code that reliably copies a CVPixelBuffer on any iOS device. My first attempt worked fine until I tried it on an iPad Pro:
extension CVPixelBuffer {
func deepcopy() -> CVPixelBuffer? {
let width = CVPixelBufferGetWidth(self)
let height = CVPixelBufferGetHeight(self)
let format = CVPixelBufferGetPixelFormatType(self)
var pixelBufferCopyOptional:CVPixelBuffer?
CVPixelBufferCreate(nil, width, height, format, nil, &pixelBufferCopyOptional)
if let pixelBufferCopy = pixelBufferCopyOptional {
CVPixelBufferLockBaseAddress(self, kCVPixelBufferLock_ReadOnly)
CVPixelBufferLockBaseAddress(pixelBufferCopy, 0)
let baseAddress = CVPixelBufferGetBaseAddress(self)
let dataSize = CVPixelBufferGetDataSize(self)
let target = CVPixelBufferGetBaseAddress(pixelBufferCopy)
memcpy(target, baseAddress, dataSize)
CVPixelBufferUnlockBaseAddress(pixelBufferCopy, 0)
CVPixelBufferUnlockBaseAddress(self, kCVPixelBufferLock_ReadOnly)
}
return pixelBufferCopyOptional
}
}
The above crashes on an iPad Pro because CVPixelBufferGetDataSize(self) is slightly larger than CVPixelBufferGetDataSize(pixelBufferCopy), so the memcpy writes to unallocated memory.
So I gave up with that and tried this:
func copy() -> CVPixelBuffer?
{
precondition(CFGetTypeID(self) == CVPixelBufferGetTypeID(), "copy() cannot be called on a non-CVPixelBuffer")
var _copy: CVPixelBuffer?
CVPixelBufferCreate(
nil,
CVPixelBufferGetWidth(self),
CVPixelBufferGetHeight(self),
CVPixelBufferGetPixelFormatType(self),
CVBufferGetAttachments(self, .shouldPropagate),
&_copy)
guard let copy = _copy else { return nil }
CVPixelBufferLockBaseAddress(self, .readOnly)
CVPixelBufferLockBaseAddress(copy, [])
defer
{
CVPixelBufferUnlockBaseAddress(copy, [])
CVPixelBufferUnlockBaseAddress(self, .readOnly)
}
for plane in 0 ..< CVPixelBufferGetPlaneCount(self)
{
let dest = CVPixelBufferGetBaseAddressOfPlane(copy, plane)
let source = CVPixelBufferGetBaseAddressOfPlane(self, plane)
let height = CVPixelBufferGetHeightOfPlane(self, plane)
let bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(self, plane)
memcpy(dest, source, height * bytesPerRow)
}
return copy
}
That works on both my test devices, but it's just reached actual customers and it turns out it crashes on the iPad 6 (and only that device so far). It's an EXC_BAD_ACCESS on the call to memcpy() again.
Seems crazy that there isn't a simple API call for this given how hard it seems to be to make it work reliably. Or am I make it harder than it needs to be? Thanks for any advice!
This questions and answer combo is solid gold. Let me add value with a slight refactor and some control flow to account for CVPixelBuffers that do not have planes.
public extension CVPixelBuffer {
func copy() throws -> CVPixelBuffer {
precondition(CFGetTypeID(self) == CVPixelBufferGetTypeID(), "copy() cannot be called on a non-CVPixelBuffer")
var _copy: CVPixelBuffer?
let width = CVPixelBufferGetWidth(self)
let height = CVPixelBufferGetHeight(self)
let formatType = CVPixelBufferGetPixelFormatType(self)
let attachments = CVBufferGetAttachments(self, .shouldPropagate)
CVPixelBufferCreate(nil, width, height, formatType, attachments, &_copy)
guard let copy = _copy else {
throw PixelBufferCopyError.allocationFailed
}
CVPixelBufferLockBaseAddress(self, .readOnly)
CVPixelBufferLockBaseAddress(copy, [])
defer {
CVPixelBufferUnlockBaseAddress(copy, [])
CVPixelBufferUnlockBaseAddress(self, .readOnly)
}
let pixelBufferPlaneCount: Int = CVPixelBufferGetPlaneCount(self)
if pixelBufferPlaneCount == 0 {
let dest = CVPixelBufferGetBaseAddress(copy)
let source = CVPixelBufferGetBaseAddress(self)
let height = CVPixelBufferGetHeight(self)
let bytesPerRowSrc = CVPixelBufferGetBytesPerRow(self)
let bytesPerRowDest = CVPixelBufferGetBytesPerRow(copy)
if bytesPerRowSrc == bytesPerRowDest {
memcpy(dest, source, height * bytesPerRowSrc)
}else {
var startOfRowSrc = source
var startOfRowDest = dest
for _ in 0..<height {
memcpy(startOfRowDest, startOfRowSrc, min(bytesPerRowSrc, bytesPerRowDest))
startOfRowSrc = startOfRowSrc?.advanced(by: bytesPerRowSrc)
startOfRowDest = startOfRowDest?.advanced(by: bytesPerRowDest)
}
}
}else {
for plane in 0 ..< pixelBufferPlaneCount {
let dest = CVPixelBufferGetBaseAddressOfPlane(copy, plane)
let source = CVPixelBufferGetBaseAddressOfPlane(self, plane)
let height = CVPixelBufferGetHeightOfPlane(self, plane)
let bytesPerRowSrc = CVPixelBufferGetBytesPerRowOfPlane(self, plane)
let bytesPerRowDest = CVPixelBufferGetBytesPerRowOfPlane(copy, plane)
if bytesPerRowSrc == bytesPerRowDest {
memcpy(dest, source, height * bytesPerRowSrc)
}else {
var startOfRowSrc = source
var startOfRowDest = dest
for _ in 0..<height {
memcpy(startOfRowDest, startOfRowSrc, min(bytesPerRowSrc, bytesPerRowDest))
startOfRowSrc = startOfRowSrc?.advanced(by: bytesPerRowSrc)
startOfRowDest = startOfRowDest?.advanced(by: bytesPerRowDest)
}
}
}
}
return copy
}
}
Valid with Swift 5. To provide a little more background... There are many formats that AVCaptureVideoDataOutput .videoSettings property can take. Not all of them have planes especially ones that ML Models might need.
The second implementation looks quite solid. The only problem I can imagine is that a plane in the new pixel buffer is allocated with a different stride length (bytes per row). The stride length is based on width × (bytes per pixel) and then rounded up in an unspecified way to achieve optimal memory access.
So check if:
CVPixelBufferGetBytesPerRowOfPlane(self, plane) == CVPixelBufferGetBytesPerRowOfPlane(copy, plane
If not, copy the pixel plane row by row:
for plane in 0 ..< CVPixelBufferGetPlaneCount(self)
{
let dest = CVPixelBufferGetBaseAddressOfPlane(copy, plane)
let source = CVPixelBufferGetBaseAddressOfPlane(self, plane)
let height = CVPixelBufferGetHeightOfPlane(self, plane)
let bytesPerRowSrc = CVPixelBufferGetBytesPerRowOfPlane(self, plane)
let bytesPerRowDest = CVPixelBufferGetBytesPerRowOfPlane(copy, plane)
if bytesPerRowSrc == bytesPerRowDest {
memcpy(dest, source, height * bytesPerRowSrc)
} else {
var startOfRowSrc = source
var startOfRowDest = dest
for _ in 0..<height {
memcpy(startOfRowDest, startOfRowSrc, min(bytesPerRowSrc, bytesPerRowDest))
startOfRowSrc += bytesPerRowSrc
startOfRowDest += bytesPerRowDest
}
}
}

Reduce memory consumption while loading gif images in UIImageView

I want to show gif image in a UIImageView and with the code below (source: https://iosdevcenters.blogspot.com/2016/08/load-gif-image-in-swift_22.html, *I did not understand all the codes), I am able to display gif images. However, the memory consumption seems high (tested on real device). Is there any way to modify the code below to reduce the memory consumption?
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
let url = "https://cdn-images-1.medium.com/max/800/1*oDqXedYUMyhWzN48pUjHyw.gif"
let gifImage = UIImage.gifImageWithURL(url)
imageView.image = gifImage
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
fileprivate func < <T : Comparable>(lhs: T?, rhs: T?) -> Bool {
switch (lhs, rhs) {
case let (l?, r?):
return l < r
case (nil, _?):
return true
default:
return false
}
}
extension UIImage {
public class func gifImageWithData(_ data: Data) -> UIImage? {
guard let source = CGImageSourceCreateWithData(data as CFData, nil) else {
print("image doesn't exist")
return nil
}
return UIImage.animatedImageWithSource(source)
}
public class func gifImageWithURL(_ gifUrl:String) -> UIImage? {
guard let bundleURL:URL? = URL(string: gifUrl) else {
return nil
}
guard let imageData = try? Data(contentsOf: bundleURL!) else {
return nil
}
return gifImageWithData(imageData)
}
public class func gifImageWithName(_ name: String) -> UIImage? {
guard let bundleURL = Bundle.main
.url(forResource: name, withExtension: "gif") else {
return nil
}
guard let imageData = try? Data(contentsOf: bundleURL) else {
return nil
}
return gifImageWithData(imageData)
}
class func delayForImageAtIndex(_ index: Int, source: CGImageSource!) -> Double {
var delay = 0.1
let cfProperties = CGImageSourceCopyPropertiesAtIndex(source, index, nil)
let gifProperties: CFDictionary = unsafeBitCast(
CFDictionaryGetValue(cfProperties,
Unmanaged.passUnretained(kCGImagePropertyGIFDictionary).toOpaque()),
to: CFDictionary.self)
var delayObject: AnyObject = unsafeBitCast(
CFDictionaryGetValue(gifProperties,
Unmanaged.passUnretained(kCGImagePropertyGIFUnclampedDelayTime).toOpaque()),
to: AnyObject.self)
if delayObject.doubleValue == 0 {
delayObject = unsafeBitCast(CFDictionaryGetValue(gifProperties,
Unmanaged.passUnretained(kCGImagePropertyGIFDelayTime).toOpaque()), to: AnyObject.self)
}
delay = delayObject as! Double
if delay < 0.1 {
delay = 0.1
}
return delay
}
class func gcdForPair(_ a: Int?, _ b: Int?) -> Int {
var a = a
var b = b
if b == nil || a == nil {
if b != nil {
return b!
} else if a != nil {
return a!
} else {
return 0
}
}
if a < b {
let c = a
a = b
b = c
}
var rest: Int
while true {
rest = a! % b!
if rest == 0 {
return b!
} else {
a = b
b = rest
}
}
}
class func gcdForArray(_ array: Array<Int>) -> Int {
if array.isEmpty {
return 1
}
var gcd = array[0]
for val in array {
gcd = UIImage.gcdForPair(val, gcd)
}
return gcd
}
class func animatedImageWithSource(_ source: CGImageSource) -> UIImage? {
let count = CGImageSourceGetCount(source)
var images = [CGImage]()
var delays = [Int]()
for i in 0..<count {
if let image = CGImageSourceCreateImageAtIndex(source, i, nil) {
images.append(image)
}
let delaySeconds = UIImage.delayForImageAtIndex(Int(i),
source: source)
delays.append(Int(delaySeconds * 1000.0)) // Seconds to ms
}
let duration: Int = {
var sum = 0
for val: Int in delays {
sum += val
}
return sum
}()
let gcd = gcdForArray(delays)
var frames = [UIImage]()
var frame: UIImage
var frameCount: Int
for i in 0..<count {
frame = UIImage(cgImage: images[Int(i)])
frameCount = Int(delays[Int(i)] / gcd)
for _ in 0..<frameCount {
frames.append(frame)
}
}
let animation = UIImage.animatedImage(with: frames,
duration: Double(duration) / 1000.0)
return animation
}
}
When I render the image as normal png image, the consumption is around 10MB.
The GIF in question has a resolution of 480×288 and contains 10 frames.
Considering that UIImageView stores frames as 4-byte RGBA, this GIF occupies 4 × 10 × 480 × 288 = 5 529 600 bytes in RAM, which is more than 5 megabytes.
There are numerous ways to mitigate that, but only one of them puts no additional strain on the CPU; the others are mere CPU-to-RAM trade-offs.
The method I`m talking about is subclassing UIImageView and loading your GIFs by hand, preserving their internal representation (indexed image + palette). It would allow you to cut the memory usage fourfold.
N.B.: even though GIFs may be stored as full images for each frame (which is the case for the GIF in question), many are not. On the contrary, most of the frames can only contain the pixels that have changed since the previous one. Thus, in general the internal GIF representation only allows to display frames in direct order.
Other methods of saving RAM include e.g. re-reading every frame from disk prior to displaying it, which is certainly not good for battery life.
To display GIFs with less memory consumption, try BBWebImage.
BBWebImage will decide how many image frames are decoded and cached depending on current memory usage. If free memory is not enough, only part of image frames are decoded and cached.
For Swift 4:
// BBAnimatedImageView (subclass UIImageView) displays animated image
imageView = BBAnimatedImageView(frame: frame)
// Load and display gif
imageView.bb_setImage(with: url,
placeholder: UIImage(named: "placeholder"))
{ (image: UIImage?, data: Data?, error: Error?, cacheType: BBImageCacheType) in
// Do something when finish loading
}

I want multiple non-random numbers in swift

I am using swift and want to have a number of duplicatable patterns throughout my game.
Ideally I would have some sort of shared class that worked sort of like this (this is sort of pseudo-Swift code):
class RandomNumberUtility {
static var sharedInstance = RandomNumberUtility()
var random1 : Random()
var random2 : Random()
func seedRandom1(seed : Int){
random1 = Random(seed)
}
func seedRandom2(seed : Int){
random2 = Random(seed)
}
func getRandom1() -> Int {
return random1.next(1,10)
}
func getRandom2() -> Int {
return random2.next(1,100)
}
}
Then, to begin the series, anywhere in my program I could go like this:
RandomNumberUtility.sharedInstance.seedNumber1(7)
RandomNumberUtility.sharedInstance.seedNumber2(12)
And then I would know that (for example) the first 4 times I called
RandomNumberUtility.sharedInstance.getRandom1()
I would always get the same values (for example: 6, 1, 2, 6)
This would continue until at some point I seeded the number again, and then I would either get the exact same series back (if I used the same seed), or a different series (if I used a different seed).
And I want to have multiple series of numbers (random1 & random2) at the same time.
I am not sure how to begin to turn this into an actual Swift class.
Here is a possible implementation. It uses the jrand48 pseudo random number generator,
which produces 32-bit numbers.
This PRNG is not as good as arc4random(), but has the advantage
that all its state is stored in a user-supplied array, so that multiple
instances can run independently.
struct RandomNumberGenerator {
// 48 bit internal state for jrand48()
private var state : [UInt16] = [0, 0, 0]
// Return pseudo-random number in the range 0 ... upper_bound-1:
mutating func next(upper_bound: UInt32) -> UInt32 {
// Implementation avoiding the "module bias" problem,
// taken from: http://stackoverflow.com/a/10989061/1187415,
// Swift translation here: http://stackoverflow.com/a/26550169/1187415
let range = UInt32.max - UInt32.max % upper_bound
var rnd : UInt32
do {
rnd = UInt32(truncatingBitPattern: jrand48(&state))
} while rnd >= range
return rnd % upper_bound
}
mutating func seed(newSeed : Int) {
state[0] = UInt16(truncatingBitPattern: newSeed)
state[1] = UInt16(truncatingBitPattern: (newSeed >> 16))
state[2] = UInt16(truncatingBitPattern: (newSeed >> 32))
}
}
Example:
var rnd1 = RandomNumberGenerator()
rnd1.seed(7)
var rnd2 = RandomNumberGenerator()
rnd2.seed(12)
println(rnd1.next(10)) // 2
println(rnd1.next(10)) // 8
println(rnd1.next(10)) // 1
println(rnd2.next(10)) // 6
println(rnd2.next(10)) // 0
println(rnd2.next(10)) // 5
If rnd1 is seeded with the same value as above then it
produces the same numbers again:
rnd1.seed(7)
println(rnd1.next(10)) // 2
println(rnd1.next(10)) // 8
println(rnd1.next(10)) // 1
What you need is a singleton that generates pseudo-random numbers and make sure all your code that need a random number call via this class. The trick is to reset the seed for each run of your code. Here is a simple RandomGenerator class that will do the trick for you (it's optimized for speed which is a good thing when writing games):
import Foundation
// This random number generator comes from: Klimov, A. and Shamir, A.,
// "A New Class of Invertible Mappings", Cryptographic Hardware and Embedded
// Systems 2002, http://dl.acm.org/citation.cfm?id=752741
//
// Very fast, very simple, and passes Diehard and other good statistical
// tests as strongly as cryptographically-secure random number generators (but
// is not itself cryptographically-secure).
class RandomNumberGenerator {
static let sharedInstance = RandomNumberGenerator()
private init(seed: UInt64 = 12347) {
self.seed = seed
}
func nextInt() -> Int {
return next(32)
}
private func isPowerOfTwo(x: Int) -> Bool { return x != 0 && ((x & (x - 1)) == 0) }
func nextInt(max: Int) -> Int {
assert(!(max < 0))
// Fast path if max is a power of 2.
if isPowerOfTwo(max) {
return Int((Int64(max) * Int64(next(31))) >> 31)
}
while (true) {
var rnd = next(31)
var val = rnd % max
if rnd - val + (max - 1) >= 0 {
return val
}
}
}
func nextBool() -> Bool {
return next(1) != 0
}
func nextDouble() -> Double {
return Double((Int64(next(26)) << 27) + Int64(next(27))) /
Double(Int64(1) << 53)
}
func nextInt64() -> Int64 {
let lo = UInt(next(32))
let hi = UInt(next(32))
return Int64(UInt64(lo) | UInt64(hi << 32))
}
func nextBytes(inout buffer: [UInt8]) {
for n in 0..<buffer.count {
buffer[n] = UInt8(next(8))
}
}
var seed: UInt64 {
get {
return _seed
}
set(seed) {
_initialSeed = seed
_seed = seed
}
}
var initialSeed: UInt64 {
return _initialSeed!
}
private func randomNumber() -> UInt32 {
_seed = _seed &+ ((_seed &* _seed) | 5)
return UInt32(_seed >> 32)
}
private func next(bits: Int) -> Int {
assert(bits > 0)
assert(!(bits > 32))
return Int(randomNumber() >> UInt32(32 - bits))
}
private var _initialSeed: UInt64?
private var _seed: UInt64 = 0
}

Resources