CGDataProvider returning null in Swift 4 - ios

I'm trying to fix a problem in SwiftColorPicker (by Matthias Schlemm) that popped up with Swift 4.0. I'm going to include the whole PickerImage class, so you can see the full context.
import UIKit
import ImageIO
open class PickerImage {
var provider:CGDataProvider!
var imageSource:CGImageSource?
var image:UIImage?
var mutableData:CFMutableData
var width:Int
var height:Int
fileprivate func createImageFromData(_ width:Int, height:Int) {
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
let provider = CGDataProvider(data: mutableData)
imageSource = CGImageSourceCreateWithDataProvider(provider!, nil)
let cgimg = CGImage(width: Int(width), height: Int(height), bitsPerComponent: Int(8), bitsPerPixel: Int(32), bytesPerRow: Int(width) * Int(4),
space: colorSpace, bitmapInfo: bitmapInfo, provider: provider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.defaultIntent)
image = UIImage(cgImage: cgimg!)
}
func changeSize(_ width:Int, height:Int) {
self.width = width
self.height = height
let size:Int = width * height * 4
CFDataSetLength(mutableData, size)
createImageFromData(width, height: height)
}
init(width:Int, height:Int) {
self.width = width
self.height = height
let size:Int = width * height * 4
mutableData = CFDataCreateMutable(kCFAllocatorDefault, size)
createImageFromData(width, height: height)
}
open func writeColorData(_ h:CGFloat, a:CGFloat) {
let d = CFDataGetMutableBytePtr(self.mutableData)
if width == 0 || height == 0 {
return
}
var i:Int = 0
let h360:CGFloat = ((h == 1 ? 0 : h) * 360) / 60.0
let sector:Int = Int(floor(h360))
let f:CGFloat = h360 - CGFloat(sector)
let f1:CGFloat = 1.0 - f
var p:CGFloat = 0.0
var q:CGFloat = 0.0
var t:CGFloat = 0.0
let sd:CGFloat = 1.0 / CGFloat(width)
let vd:CGFloat = 1 / CGFloat(height)
var double_s:CGFloat = 0
var pf:CGFloat = 0
let v_range = 0..<height
let s_range = 0..<width
for v in v_range {
pf = 255 * CGFloat(v) * vd
for s in s_range {
i = (v * width + s) * 4
d?[i] = UInt8(255)
if s == 0 {
q = pf
d?[i+1] = UInt8(q)
d?[i+2] = UInt8(q)
d?[i+3] = UInt8(q)
continue
}
double_s = CGFloat(s) * sd
p = pf * (1.0 - double_s)
q = pf * (1.0 - double_s * f)
t = pf * ( 1.0 - double_s * f1)
switch(sector) {
case 0:
d?[i+1] = UInt8(pf)
d?[i+2] = UInt8(t)
d?[i+3] = UInt8(p)
case 1:
d?[i+1] = UInt8(q)
d?[i+2] = UInt8(pf)
d?[i+3] = UInt8(p)
case 2:
d?[i+1] = UInt8(p)
d?[i+2] = UInt8(pf)
d?[i+3] = UInt8(t)
case 3:
d?[i+1] = UInt8(p)
d?[i+2] = UInt8(q)
d?[i+3] = UInt8(pf)
case 4:
d?[i+1] = UInt8(t)
d?[i+2] = UInt8(p)
d?[i+3] = UInt8(pf)
default:
d?[i+1] = UInt8(pf)
d?[i+2] = UInt8(p)
d?[i+3] = UInt8(q)
}
}
}
}
}
In createImageFromData, the line
let provider = CGDataProvider(data: mutableData)
is returning a nil value, which, of course, causes the following line to crash. This was working fine in Swift 3.
Here are the values in debugger:
Dealing with memory allocation is a bit beyond my current skillset, so I'm struggling with what's actually going on here. Has anything related to this changed in Swift 4.0 that would cause the CGDataProvider call to return a nil value?
Edit:
Here is the ColorPicker class that initializes the PickerImage objects.
import UIKit
import ImageIO
open class ColorPicker: UIView {
fileprivate var pickerImage1:PickerImage?
fileprivate var pickerImage2:PickerImage?
fileprivate var image:UIImage?
fileprivate var data1Shown = false
fileprivate lazy var opQueue:OperationQueue = {return OperationQueue()}()
fileprivate var lock:NSLock = NSLock()
fileprivate var rerender = false
open var onColorChange:((_ color:UIColor, _ finished:Bool)->Void)? = nil
open var a:CGFloat = 1 {
didSet {
if a < 0 || a > 1 {
a = max(0, min(1, a))
}
}
}
open var h:CGFloat = 0 { // // [0,1]
didSet {
if h > 1 || h < 0 {
h = max(0, min(1, h))
}
renderBitmap()
setNeedsDisplay()
}
}
fileprivate var currentPoint:CGPoint = CGPoint.zero
open func saturationFromCurrentPoint() -> CGFloat {
return (1 / bounds.width) * currentPoint.x
}
open func brigthnessFromCurrentPoint() -> CGFloat {
return (1 / bounds.height) * currentPoint.y
}
open var color:UIColor {
set(value) {
var hue:CGFloat = 1
var saturation:CGFloat = 1
var brightness:CGFloat = 1
var alpha:CGFloat = 1
value.getHue(&hue, saturation: &saturation, brightness: &brightness, alpha: &alpha)
a = alpha
if hue != h || pickerImage1 === nil {
self.h = hue
}
currentPoint = CGPoint(x: saturation * bounds.width, y: brightness * bounds.height)
self.setNeedsDisplay()
}
get {
return UIColor(hue: h, saturation: saturationFromCurrentPoint(), brightness: brigthnessFromCurrentPoint(), alpha: a)
}
}
public override init(frame: CGRect) {
super.init(frame: frame)
commonInit()
}
public required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
commonInit()
}
func commonInit() {
isUserInteractionEnabled = true
clipsToBounds = false
self.addObserver(self, forKeyPath: "bounds", options: [NSKeyValueObservingOptions.new, NSKeyValueObservingOptions.initial], context: nil)
}
deinit {
self.removeObserver(self, forKeyPath: "bounds")
}
open override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey : Any]?, context: UnsafeMutableRawPointer?) {
if keyPath == "bounds" {
if let pImage1 = pickerImage1 {
pImage1.changeSize(Int(self.bounds.width), height: Int(self.bounds.height))
}
if let pImage2 = pickerImage2 {
pImage2.changeSize(Int(self.bounds.width), height: Int(self.bounds.height))
}
renderBitmap()
self.setNeedsDisplay()
} else {
super.observeValue(forKeyPath: keyPath, of: object, change: change, context: context)
}
}
open override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let touch = touches.first! as UITouch
handleTouche(touch, ended: false)
}
open override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
let touch = touches.first! as UITouch
handleTouche(touch, ended: false)
}
open override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
let touch = touches.first! as UITouch
handleTouche(touch, ended: true)
}
fileprivate func handleColorChange(_ color:UIColor, changing:Bool) {
if color !== self.color {
if let handler = onColorChange {
handler(color, !changing)
}
setNeedsDisplay()
}
}
fileprivate func handleTouche(_ touch:UITouch, ended:Bool) {
// set current point
let point = touch.location(in: self)
if self.bounds.contains(point) {
currentPoint = point
} else {
let x:CGFloat = min(bounds.width, max(0, point.x))
let y:CGFloat = min(bounds.width, max(0, point.y))
currentPoint = CGPoint(x: x, y: y)
}
handleColorChange(pointToColor(point), changing: !ended)
}
fileprivate func pointToColor(_ point:CGPoint) ->UIColor {
let s:CGFloat = min(1, max(0, (1.0 / bounds.width) * point.x))
let b:CGFloat = min(1, max(0, (1.0 / bounds.height) * point.y))
return UIColor(hue: h, saturation: s, brightness: b, alpha:a)
}
fileprivate func renderBitmap() {
if self.bounds.isEmpty {
return
}
if !lock.try() {
rerender = true
return
}
rerender = false
if pickerImage1 == nil {
self.pickerImage1 = PickerImage(width: Int(bounds.width), height: Int(bounds.height))
self.pickerImage2 = PickerImage(width: Int(bounds.width), height: Int(bounds.height))
}
opQueue.addOperation { () -> Void in
// Write colors to data array
if self.data1Shown { self.pickerImage2!.writeColorData(self.h, a:self.a) }
else { self.pickerImage1!.writeColorData(self.h, a:self.a)}
// flip images
// self.image = self.data1Shown ? self.pickerImage2!.image! : self.pickerImage1!.image!
self.data1Shown = !self.data1Shown
// make changes visible
OperationQueue.main.addOperation({ () -> Void in
self.setNeedsDisplay()
self.lock.unlock()
if self.rerender {
self.renderBitmap()
}
})
}
}
open override func draw(_ rect: CGRect) {
if let img = image {
img.draw(in: rect)
}
//// Oval Drawing
let ovalPath = UIBezierPath(ovalIn: CGRect(x: currentPoint.x - 5, y: currentPoint.y - 5, width: 10, height: 10))
UIColor.white.setStroke()
ovalPath.lineWidth = 1
ovalPath.stroke()
//// Oval 2 Drawing
let oval2Path = UIBezierPath(ovalIn: CGRect(x: currentPoint.x - 4, y: currentPoint.y - 4, width: 8, height: 8))
UIColor.black.setStroke()
oval2Path.lineWidth = 1
oval2Path.stroke()
}
}

It's not a problem of Swift 4, but a problem of iOS 11. You may find your code works on iOS 10 simulator.
The original code seemingly works in iOS 10, depending on just a luck.
In this part of the code:
init(width:Int, height:Int) {
self.width = width
self.height = height
let size:Int = width * height * 4
mutableData = CFDataCreateMutable(kCFAllocatorDefault, size)
createImageFromData(width, height: height)
}
The property mutableData is initialized with a CFMutableData of capacity: size, and empty (that is, content-less).
And in iOS 11, the initializer CGDataProvider.init(data:) rejects an empty CFData as it should not be empty as a data provider.
A quick fix would be something like this:
init(width:Int, height:Int) {
self.width = width
self.height = height
let size:Int = width * height * 4
mutableData = CFDataCreateMutable(kCFAllocatorDefault, size)
CFDataSetLength(mutableData, size) //<-set the length of the data
createImageFromData(width, height: height)
}
But I'm not sure other parts of the code would work as expected in iOS 11.

The problem is you added let to that line and it creates a new variable call provider within the function createImageFromData. That way, it's never passed to the provider parameter of the class and then, it's always null. Just remove the let and it should work properly.
fileprivate func createImageFromData(_ width:Int, height:Int) {
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
provider = CGDataProvider(data: mutableData)
imageSource = CGImageSourceCreateWithDataProvider(provider!, nil)
let cgimg = CGImage(width: Int(width), height: Int(height), bitsPerComponent: Int(8), bitsPerPixel: Int(32), bytesPerRow: Int(width) * Int(4),
space: colorSpace, bitmapInfo: bitmapInfo, provider: provider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.defaultIntent)
image = UIImage(cgImage: cgimg!)
}

Related

Swipeable CIFilter over video

I am currently trying to implement something similar to Instagram's story feature where you take a picture or a video and when swiping left or right you change the current filter over the content. ( here is an example of what I managed to do in my app for images https://imgur.com/a/pYKrPkA )
As you can see in the example, I got it done for images but now my problem is that I am trying to make if work for videos aswell and I am a bit lost from where to start.
final class Filter: NSObject {
var isEnabled: Bool = true
var overlayImage: CIImage?
var ciFilter: CIFilter?
init(ciFilter: CIFilter?) {
self.ciFilter = ciFilter
super.init()
}
/// Empty filter for the original photo
static func emptyFilter() -> Filter {
return Filter(ciFilter: nil)
}
func imageByProcessingImage(_ image: CIImage, at time: CFTimeInterval) -> CIImage? {
guard isEnabled else { return image }
var image = image
if let overlayImage = overlayImage {
image = overlayImage.composited(over: image)
}
guard let ciFilter = ciFilter else {
return image
}
ciFilter.setValue(image, forKey: kCIInputImageKey)
return ciFilter.value(forKey: kCIOutputImageKey) as? CIImage
}
}
class StoriesImageView: UIView {
private var metalView: MTKView?
private var ciImage: CIImage?
private var preferredCIImageTransform: CGAffineTransform?
private let device = MTLCreateSystemDefaultDevice()
private var commandQueue: MTLCommandQueue?
private var context: CIContext?
override func layoutSubviews() {
super.layoutSubviews()
metalView?.frame = bounds
}
override func setNeedsDisplay() {
super.setNeedsDisplay()
metalView?.setNeedsDisplay()
}
func setImage(with image: UIImage) {
preferredCIImageTransform = preferredCIImageTransform(from: image)
if let cgImage = image.cgImage {
ciImage = CIImage(cgImage: cgImage)
loadContextIfNeeded()
}
setNeedsDisplay()
}
/// Return the image fitted to 1080x1920.
func renderedUIImage() -> UIImage? {
return renderedUIImage(in: CGRect(origin: .zero, size: CGSize(width: 1080, height: 1920)))
}
/// Returns CIImage in fitted to main screen bounds.
func renderedCIIImage() -> CIImage? {
return renderedCIImage(in: CGRect(rect: bounds, contentScale: UIScreen.main.scale))
}
func renderedUIImage(in rect: CGRect) -> UIImage? {
if let image = renderedCIImage(in: rect), let context = context {
if let imageRef = context.createCGImage(image, from: image.extent) {
return UIImage(cgImage: imageRef)
}
}
return nil
}
func renderedCIImage(in rect: CGRect) -> CIImage? {
if var image = ciImage, let transform = preferredCIImageTransform {
image = image.transformed(by: transform)
return scaleAndResize(image, for: rect)
}
return nil
}
private func cleanupContext() {
metalView?.removeFromSuperview()
metalView?.releaseDrawables()
metalView = nil
}
private func loadContextIfNeeded() {
setContext()
}
private func setContext() {
let mView = MTKView(frame: bounds, device: device)
mView.clearColor = MTLClearColor(red: 0, green: 0, blue: 0, alpha: 0)
mView.framebufferOnly = false
mView.enableSetNeedsDisplay = true
mView.contentScaleFactor = contentScaleFactor
mView.delegate = self
metalView = mView
commandQueue = device?.makeCommandQueue()
context = CIContext(mtlDevice: device!)
insertSubview(metalView!, at: 0)
}
private func scaleAndResize(_ image: CIImage, for rect: CGRect) -> CIImage {
let imageSize = image.extent.size
let horizontalScale = rect.size.width / imageSize.width
let verticalScale = rect.size.height / imageSize.height
let scale = min(horizontalScale, verticalScale)
return image.transformed(by: CGAffineTransform(scaleX: scale, y: scale))
}
private func preferredCIImageTransform(from image: UIImage) -> CGAffineTransform {
if image.imageOrientation == .up {
return .identity
}
var transform: CGAffineTransform = .identity
switch image.imageOrientation {
case .down, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: image.size.height)
transform = transform.rotated(by: .pi)
case .left, .leftMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.rotated(by: .pi / 2)
case .right, .rightMirrored:
transform = transform.translatedBy(x: 0, y: image.size.height)
transform = transform.rotated(by: .pi / -2)
case .up, .upMirrored: break
#unknown default: fatalError("Unknown image orientation")
}
switch image.imageOrientation {
case .upMirrored, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .leftMirrored, .rightMirrored:
transform = transform.translatedBy(x: image.size.height, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .up, .down, .left, .right: break
#unknown default: fatalError("Unknown image orientation")
}
return transform
}
}
extension StoriesImageView: MTKViewDelegate {
func draw(in view: MTKView) {
autoreleasepool {
let rect = CGRect(rect: view.bounds, contentScale: UIScreen.main.scale)
if let image = renderedCIImage(in: rect) {
let commandBuffer = commandQueue?.makeCommandBuffer()
guard let drawable = view.currentDrawable else {
return
}
let heightDifference = (view.drawableSize.height - image.extent.size.height) / 2
let destination = CIRenderDestination(width: Int(view.drawableSize.width),
height: Int(view.drawableSize.height - heightDifference),
pixelFormat: view.colorPixelFormat,
commandBuffer: commandBuffer,
mtlTextureProvider: { () -> MTLTexture in
return drawable.texture
})
_ = try? context?.startTask(toRender: image, to: destination)
commandBuffer?.present(drawable)
commandBuffer?.commit()
}
}
}
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {}
}
final class StoriesSwipeableImageView: StoriesImageView {
private let scrollView: UIScrollView = UIScrollView()
private let preprocessingFilter: Filter? = nil
var isRefreshingAutomaticallyWhenScrolling: Bool = true
var filters: [Filter]? {
didSet {
updateScrollViewContentSize()
updateCurrentSelected(notify: true)
}
}
var selectedFilter: Filter? {
didSet {
if selectedFilter != oldValue {
setNeedsLayout()
}
}
}
override init(frame: CGRect) {
super.init(frame: frame)
setup()
}
required init?(coder: NSCoder) {
super.init(coder: coder)
setup()
}
override func layoutSubviews() {
super.layoutSubviews()
scrollView.frame = bounds
updateScrollViewContentSize()
}
private func setup() {
scrollView.delegate = self
scrollView.isPagingEnabled = true
scrollView.showsHorizontalScrollIndicator = false
scrollView.showsVerticalScrollIndicator = false
scrollView.bounces = true
scrollView.alwaysBounceVertical = true
scrollView.alwaysBounceHorizontal = true
scrollView.backgroundColor = .clear
addSubview(scrollView)
}
private func updateScrollViewContentSize() {
let filterCount = filters?.count ?? 0
scrollView.contentSize = CGSize(width: filterCount * Int(frame.size.width) * 3,
height: Int(frame.size.height))
if let selectedFilter = selectedFilter {
scroll(to: selectedFilter, animated: false)
}
}
private func scroll(to filter: Filter, animated: Bool) {
if let index = filters?.firstIndex(where: { $0 === filter }) {
let contentOffset = CGPoint(x: scrollView.contentSize.width / 3 + scrollView.frame.size.width * CGFloat(index), y: 0)
scrollView.setContentOffset(contentOffset, animated: animated)
updateCurrentSelected(notify: false)
} else {
fatalError("Filter is not available in filters collection")
}
}
private func updateCurrentSelected(notify: Bool) {
guard frame.size.width != 0 else { return }
let filterCount = filters?.count ?? 0
let selectedIndex = Int(scrollView.contentOffset.x + scrollView.frame.size.width / 2) / Int(scrollView.frame.size.width) % filterCount
var newFilterGroup: Filter?
if selectedIndex >= 0 && selectedIndex < filterCount {
newFilterGroup = filters?[selectedIndex]
} else {
fatalError("Invalid contentOffset")
}
if selectedFilter != newFilterGroup {
selectedFilter = newFilterGroup
if notify {
// Notify delegate?
}
}
}
override func renderedCIImage(in rect: CGRect) -> CIImage? {
guard var image = super.renderedCIImage(in: rect) else {
print("Failed to render image")
return nil
}
let timeinterval: CFTimeInterval = 0
if let preprocessingFilter = self.preprocessingFilter {
image = preprocessingFilter.imageByProcessingImage(image, at: timeinterval)!
}
let extent = image.extent
let contentSize = scrollView.bounds.size
if contentSize.width == 0 {
return image
}
let filtersCount = filters?.count ?? 0
if filtersCount == 0 {
return image
}
let ratio = scrollView.contentOffset.x / contentSize.width
var index = Int(ratio)
let upIndex = Int(ceil(ratio))
let remaningRatio = ratio - CGFloat(index)
var xImage = extent.size.width * -remaningRatio
var outputImage: CIImage? = CIImage(color: CIColor(red: 0, green: 0, blue: 0))
while index <= upIndex {
let currentIndex = index % filtersCount
let filter = filters?[currentIndex]
var filteredImage = filter?.imageByProcessingImage(image, at: timeinterval)
filteredImage = filteredImage?.cropped(to:
CGRect(x: extent.origin.x + xImage,
y: extent.origin.y,
width: extent.size.width,
height: extent.size.height)
)
guard let output = outputImage else { return nil }
outputImage = filteredImage?.composited(over: output)
xImage += extent.size.width
index += 1
}
outputImage = outputImage?.cropped(to: extent)
return outputImage
}
}
extension StoriesSwipeableImageView: UIScrollViewDelegate {
func scrollViewDidScroll(_ scrollView: UIScrollView) {
let width = scrollView.frame.size.width
let contentOffsetX = scrollView.contentOffset.x
let contentSizeWidth = scrollView.contentSize.width
let normalWidth = CGFloat(filters?.count ?? 0) * width
if width > 0 && contentSizeWidth > 0 {
if contentOffsetX <= 0 {
scrollView.contentOffset = CGPoint(x: contentOffsetX + normalWidth, y: scrollView.contentOffset.y)
} else if contentOffsetX + width >= contentSizeWidth {
scrollView.contentOffset = CGPoint(x: contentOffsetX - normalWidth, y: scrollView.contentOffset.y)
}
}
if isRefreshingAutomaticallyWhenScrolling {
setNeedsDisplay()
}
}
func scrollViewDidScrollToTop(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndScrollingAnimation(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDecelerating(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDragging(_ scrollView: UIScrollView, willDecelerate decelerate: Bool) {
if !decelerate {
updateCurrentSelected(notify: true)
}
}
}
These 3 are the classes that do the magic for the image part. Does anyone have a suggestion or a starting point for this? I tried looking over at https://github.com/rFlex/SCRecorder but I get a bit lost in Obj-C.
In iOS 9 / OS X 10.11 / tvOS, there's a convenience method for applying CIFilters to video. It works on an AVVideoComposition, so you can use it both for playback and for file-to-file import/export. See AVVideoComposition.init(asset:applyingCIFiltersWithHandler:) for the method docs.
There's an example in Apple's Core Image Programming Guide, too:
let filter = CIFilter(name: "CIGaussianBlur")!
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
// Clamp to avoid blurring transparent pixels at the image edges
let source = request.sourceImage.clampingToExtent()
filter.setValue(source, forKey: kCIInputImageKey)
// Vary filter parameters based on video timing
let seconds = CMTimeGetSeconds(request.compositionTime)
filter.setValue(seconds * 10.0, forKey: kCIInputRadiusKey)
// Crop the blurred output to the bounds of the original image
let output = filter.outputImage!.cropping(to: request.sourceImage.extent)
// Provide the filter output to the composition
request.finish(with: output, context: nil)
})
That part sets up the composition. After you've done that, you can either play it by assigning it to an AVPlayer or write it to a file with AVAssetExportSession. Since you're after the latter, here's an example of that:
let export = AVAssetExportSession(asset: asset, presetName: AVAssetExportPreset1920x1200)
export.outputFileType = AVFileTypeQuickTimeMovie
export.outputURL = outURL
export.videoComposition = composition
export.exportAsynchronouslyWithCompletionHandler(/*...*/)
There's a bit more about this in the WWDC15 session on Core Image, starting around 20 minutes in.

Determining if custom iOS views overlap

I've defined a CircleView class:
class CircleView: UIView {
override init(frame: CGRect) {
super.init(frame: frame)
backgroundColor = UIColor.clear
}
required init(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func draw(_ rect: CGRect) {
// Get the Graphics Context
if let context = UIGraphicsGetCurrentContext() {
// Set the circle outerline-width
context.setLineWidth(5.0);
// Set the circle outerline-colour
UIColor.blue.set()
// Create Circle
let center = CGPoint(x: frame.size.width/2, y: frame.size.height/2)
let radius = (frame.size.width - 10)/2
context.addArc(center: center, radius: radius, startAngle: 0.0, endAngle: .pi * 2.0, clockwise: true)
context.setFillColor(UIColor.blue.cgColor)
// Draw
context.strokePath()
context.fillPath()
}
}
}
And created an array of them with a randomly set number:
var numberOfCircles: Int!
var circles: [CircleView] = []
numberOfCircles = Int.random(in: 1..<10)
let circleWidth = CGFloat(50)
let circleHeight = circleWidth
var i = 0
while i < numberOfCircles {
let circleView = CircleView(frame: CGRect(x: 0.0, y: 0.0, width: circleWidth, height: circleHeight))
circles.append(circleView)
i += 1
}
After creating the circles, I call a function, drawCircles, that will draw them on the screen:
func drawCircles(){
for c in circles {
c.frame.origin = c.frame.randomPoint
while !UIScreen.main.bounds.contains(c.frame.origin) {
c.frame.origin = CGPoint()
c.frame.origin = c.frame.randomPoint
let prev = circles.before(c)
if prev?.frame.intersects(c.frame) == true {
c.frame.origin = c.frame.randomPoint
}
}
}
for c in circles {
self.view.addSubview(c)
}
}
The while loop in the drawCircles method makes sure that no circles are placed outside of the bounds of the screen, and works as expected.
What I'm struggling with is to make sure that the circles don't overlap each other, like so:
I'm using the following methods to determine either the next
I'm using this methods to determine what the previous / next element in the array of circles:
extension BidirectionalCollection where Iterator.Element: Equatable {
typealias Element = Self.Iterator.Element
func after(_ item: Element, loop: Bool = false) -> Element? {
if let itemIndex = self.firstIndex(of: item) {
let lastItem: Bool = (index(after:itemIndex) == endIndex)
if loop && lastItem {
return self.first
} else if lastItem {
return nil
} else {
return self[index(after:itemIndex)]
}
}
return nil
}
func before(_ item: Element, loop: Bool = false) -> Element? {
if let itemIndex = self.firstIndex(of: item) {
let firstItem: Bool = (itemIndex == startIndex)
if loop && firstItem {
return self.last
} else if firstItem {
return nil
} else {
return self[index(before:itemIndex)]
}
}
return nil
}
}
This if statement, however; doesn't seem to be doing what I'm wanting; which is to make sure that if a circle intersects with another one, to change it's origin to be something new:
if prev?.frame.intersects(c.frame) == true {
c.frame.origin = c.frame.randomPoint
}
If anyone has any ideas where the logic may be, or of other ideas on how to make sure that the circles don't overlap with each other, that would be helpful!
EDIT: I did try the suggestion that Eugene gave in his answer like so, but still get the same result:
func distance(_ a: CGPoint, _ b: CGPoint) -> CGFloat {
let xDist = a.x - b.x
let yDist = a.y - b.y
return CGFloat(sqrt(xDist * xDist + yDist * yDist))
}
if prev != nil {
if distance((prev?.frame.origin)!, c.frame.origin) <= 40 {
print("2")
c.frame.origin = CGPoint()
c.frame.origin = c.frame.randomPoint
}
}
But still the same result
EDIT 2
Modified my for loop based on Eugene's edited answer / clarifications; still having issues with overlapping circles:
for c in circles {
c.frame.origin = c.frame.randomPoint
let prev = circles.before(c)
let viewMidX = self.circlesView.bounds.midX
let viewMidY = self.circlesView.bounds.midY
let xPosition = self.circlesView.frame.midX - viewMidX + CGFloat(arc4random_uniform(UInt32(viewMidX*2)))
let yPosition = self.circlesView.frame.midY - viewMidY + CGFloat(arc4random_uniform(UInt32(viewMidY*2)))
if let prev = prev {
if distance(prev.center, c.center) <= 50 {
c.center = CGPoint(x: xPosition, y: yPosition)
}
}
}
That’s purely geometric challenge. Just ensure that distance between the circle centers greater than or equal to sum of their radiuses.
Edit 1
Use UIView.center instead of UIView.frame.origin. UIView.frame.origin gives you the top left corner of UIView.
if let prev = prev {
if distance(prev.center, c.center) <= 50 {
print("2")
c.center = ...
}
}
Edit 2
func distance(_ a: CGPoint, _ b: CGPoint) -> CGFloat {
let xDist = a.x - b.x
let yDist = a.y - b.y
return CGFloat(hypot(xDist, yDist))
}
let prev = circles.before(c)
if let prevCircleCenter = prev?.center {
let distance = distance(prevCenter, c.center)
if distance <= 50 {
let viewMidX = c.bounds.midX
let viewMidY = c.bounds.midY
var newCenter = c.center
var centersVector = CGVector(dx: newCenter.x - prevCircleCenter.x, dy: newCenter.y - prevCircleCenter.y)
centersVector.dx *= 51 / distance
centersVector.dy *= 51 / distance
newCenter.x = prevCircleCenter.x + centersVector.dx
newCenter.y = prevCircleCenter.y + centersVector.dy
c.center = newCenter
}
}

How to make Circular audio visualizer in swift?

I want to make a visualizer like this Circular visualizer, click the green flag to see the animation.
In my project first I draw a circle, I calculate the points on the circle to draw the visualizer bars, I rotate the view to make the bars feels like circle. I use StreamingKit to stream live radio. StreamingKit provides the live audio power in decibels. Then I animate the visualizer bars. But when I rotate the view the height and width changes according to the angle I rotate. But the bounds value not change (I know the frame depends on superViews).
audioSpectrom Class
class audioSpectrom: UIView {
let animateDuration = 0.15
let visualizerColor = #colorLiteral(red: 1, green: 1, blue: 1, alpha: 1)
var barsNumber = 0
let barWidth = 4 // width of bar
let radius: CGFloat = 40
var radians = [CGFloat]()
var barPoints = [CGPoint]()
private var rectArray = [CustomView]()
private var waveFormArray = [Int]()
private var initialBarHeight: CGFloat = 0.0
private let mainLayer: CALayer = CALayer()
// draw circle
var midViewX: CGFloat!
var midViewY: CGFloat!
var circlePath = UIBezierPath()
override init(frame: CGRect) {
super.init(frame: frame)
setupView()
}
convenience init() {
self.init(frame: CGRect.zero)
setupView()
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
setupView()
}
private func setupView() {
self.layer.addSublayer(mainLayer)
barsNumber = 10
}
override func layoutSubviews() {
mainLayer.frame = CGRect(x: 0, y: 0, width: frame.width, height: frame.height)
drawVisualizer()
}
//-----------------------------------------------------------------
// MARK: - Drawing Section
//-----------------------------------------------------------------
func drawVisualizer() {
midViewX = self.mainLayer.frame.midX
midViewY = self.mainLayer.frame.midY
// Draw Circle
let arcCenter = CGPoint(x: midViewX, y: midViewY)
let circlePath = UIBezierPath(arcCenter: arcCenter, radius: radius, startAngle: 0, endAngle: CGFloat(Double.pi * 2), clockwise: true)
let circleShapeLayer = CAShapeLayer()
circleShapeLayer.path = circlePath.cgPath
circleShapeLayer.fillColor = UIColor.blue.cgColor
circleShapeLayer.strokeColor = UIColor.clear.cgColor
circleShapeLayer.lineWidth = 1.0
mainLayer.addSublayer(circleShapeLayer)
// Draw Bars
rectArray = [CustomView]()
for i in 0..<barsNumber {
let angle = ((360 / barsNumber) * i) - 90
let point = calculatePoints(angle: angle, radius: radius)
let radian = angle.degreesToRadians
radians.append(radian)
barPoints.append(point)
let rectangle = CustomView(frame: CGRect(x: barPoints[i].x, y: barPoints[i].y, width: CGFloat(barWidth), height: CGFloat(barWidth)))
initialBarHeight = CGFloat(self.barWidth)
rectangle.setAnchorPoint(anchorPoint: CGPoint.zero)
let rotationAngle = (CGFloat(( 360/barsNumber) * i)).degreesToRadians + 180.degreesToRadians
rectangle.transform = CGAffineTransform(rotationAngle: rotationAngle)
rectangle.backgroundColor = visualizerColor
rectangle.layer.cornerRadius = CGFloat(rectangle.bounds.width / 2)
rectangle.tag = i
self.addSubview(rectangle)
rectArray.append(rectangle)
var values = [5, 10, 15, 10, 5, 1]
waveFormArray = [Int]()
var j: Int = 0
for _ in 0..<barsNumber {
waveFormArray.append(values[j])
j += 1
if j == values.count {
j = 0
}
}
}
}
//-----------------------------------------------------------------
// MARK: - Animation Section
//-----------------------------------------------------------------
func animateAudioVisualizerWithChannel(level0: Float, level1: Float ) {
DispatchQueue.main.async {
UIView.animateKeyframes(withDuration: self.animateDuration, delay: 0, options: .beginFromCurrentState, animations: {
for i in 0..<self.barsNumber {
let channelValue: Int = Int(arc4random_uniform(2))
let wavePeak: Int = Int(arc4random_uniform(UInt32(self.waveFormArray[i])))
let barView = self.rectArray[i] as? CustomView
guard var barFrame = barView?.frame else { return }
// calculate the bar height
let barH = (self.frame.height / 2 ) - self.radius
// scale the value to 40, input value of this func range from 0-60, 60 is low and 0 is high. Then calculate the height by minimise the scaled height from bar height.
let scaled0 = (CGFloat(level0) * barH) / 60
let scaled1 = (CGFloat(level1) * barH) / 60
let calc0 = barH - scaled0
let calc1 = barH - scaled1
if channelValue == 0 {
barFrame.size.height = calc0
} else {
barFrame.size.height = calc1
}
if barFrame.size.height < 4 || barFrame.size.height > ((self.frame.size.height / 2) - self.radius) {
barFrame.size.height = self.initialBarHeight + CGFloat(wavePeak)
}
barView?.frame = barFrame
}
}, completion: nil)
}
}
func calculatePoints(angle: Int, radius: CGFloat) -> CGPoint {
let barX = midViewX + cos((angle).degreesToRadians) * radius
let barY = midViewY + sin((angle).degreesToRadians) * radius
return CGPoint(x: barX, y: barY)
}
}
extension BinaryInteger {
var degreesToRadians: CGFloat { return CGFloat(Int(self)) * .pi / 180 }
}
extension FloatingPoint {
var degreesToRadians: Self { return self * .pi / 180 }
var radiansToDegrees: Self { return self * 180 / .pi }
}
extension UIView{
func setAnchorPoint(anchorPoint: CGPoint) {
var newPoint = CGPoint(x: self.bounds.size.width * anchorPoint.x, y: self.bounds.size.height * anchorPoint.y)
var oldPoint = CGPoint(x: self.bounds.size.width * self.layer.anchorPoint.x, y: self.bounds.size.height * self.layer.anchorPoint.y)
newPoint = newPoint.applying(self.transform)
oldPoint = oldPoint.applying(self.transform)
var position : CGPoint = self.layer.position
position.x -= oldPoint.x
position.x += newPoint.x;
position.y -= oldPoint.y;
position.y += newPoint.y;
self.layer.position = position;
self.layer.anchorPoint = anchorPoint;
}
}
I drag a empty view to storyBoard and give custom class as audioSpectrom.
ViewController
func startAudioVisualizer() {
visualizerTimer?.invalidate()
visualizerTimer = nil
visualizerTimer = Timer.scheduledTimer(timeInterval: visualizerAnimationDuration, target: self, selector: #selector(self.visualizerTimerFunc), userInfo: nil, repeats: true)
}
#objc func visualizerTimerFunc(_ timer: CADisplayLink) {
let lowResults = self.audioPlayer!.averagePowerInDecibels(forChannel: 0)
let lowResults1 = self.audioPlayer!.averagePowerInDecibels(forChannel: 1)
audioSpectrom.animateAudioVisualizerWithChannel(level0: -lowResults, level1: -lowResults1)
}
OUTPUT
Without animation
With animation
In my observation, the height value and width value of frame changed when rotates. Means when I give CGSize(width: 4, height: 4) to bar, then when I rotate using some angle it changes the size of frame like CGSize(width: 3.563456, height: 5.67849) (not sure for the value, it's an assumption).
How to resolve this problem?
Any suggestions or answers will be appreciated.
Edit
func animateAudioVisualizerWithChannel(level0: Float, level1: Float ) {
DispatchQueue.main.async {
UIView.animateKeyframes(withDuration: self.animateDuration, delay: 0, options: .beginFromCurrentState, animations: {
for i in 0..<self.barsNumber {
let channelValue: Int = Int(arc4random_uniform(2))
let wavePeak: Int = Int(arc4random_uniform(UInt32(self.waveFormArray[i])))
var barView = self.rectArray[i] as? CustomView
guard let barViewUn = barView else { return }
let barH = (self.frame.height / 2 ) - self.radius
let scaled0 = (CGFloat(level0) * barH) / 60
let scaled1 = (CGFloat(level1) * barH) / 60
let calc0 = barH - scaled0
let calc1 = barH - scaled1
let kSavedTransform = barViewUn.transform
barViewUn.transform = .identity
if channelValue == 0 {
barViewUn.frame.size.height = calc0
} else {
barViewUn.frame.size.height = calc1
}
if barViewUn.frame.height < CGFloat(4) || barViewUn.frame.height > ((self.frame.size.height / 2) - self.radius) {
barViewUn.frame.size.height = self.initialBarHeight + CGFloat(wavePeak)
}
barViewUn.transform = kSavedTransform
barView = barViewUn
}
}, completion: nil)
}
}
Output
Run the below code snippet show the output
<img src="https://i.imgflip.com/227xsa.gif" title="made at imgflip.com"/>
GOT IT!!
circular-visualizer
There are two (maybe three) issues in your code:
1. audioSpectrom.layoutSubviews()
You create new views in layoutSubviews and add them to the view hierarchy. This is not what you are intened to do, because layoutSubviews is called multiple times and you should use it only for layouting purposes.
As a dirty work-around, I modified the code in the func drawVisualizer to only add the bars once:
func drawVisualizer() {
// ... some code here
// ...
mainLayer.addSublayer(circleShapeLayer)
// This will ensure to only add the bars once:
guard rectArray.count == 0 else { return } // If we already have bars, just return
// Draw Bars
rectArray = [CustomView]()
// ... Rest of the func
}
Now, it almost looks good, but there are still some dirt effects with the topmost bar. So you'll have to change
2. audioSectrom.animateAudioVisualizerWithChannel(level0:level1:)
Here, you want to recalculate the frame of the bars. Since they are rotated, the frame also is rotated, and you'd have to apply some mathematical tricks. To avoid this adn make your life more easy, you save the rotated transform, set it to .identity, modify the frame, and then restore the original rotated transform. Unfortunately, this causes some dirt effects with rotations of 0 or 2pi, maybe caused by some rounding issues. Never mind, there is a much more simple solution:
Instead of modifiying the frame, you better modify the bounds.
frame is measured in the outer (in your case: rotated) coordinate system
bounds is measured in the inner (non-transformed) coordinate system
So I simply replaced all the frames with bounds in the function animateAudioVisualizerWithChannel and also removed the saving and restoring of the transformation matrix:
func animateAudioVisualizerWithChannel(level0: Float, level1: Float ) {
// some code before
guard let barViewUn = barView else { return }
let barH = (self.bounds.height / 2 ) - self.radius
let scaled0 = (CGFloat(level0) * barH) / 60
let scaled1 = (CGFloat(level1) * barH) / 60
let calc0 = barH - scaled0
let calc1 = barH - scaled1
if channelValue == 0 {
barViewUn.bounds.size.height = calc0
} else {
barViewUn.bounds.size.height = calc1
}
if barViewUn.bounds.height < CGFloat(4) || barViewUn.bounds.height > ((self.bounds.height / 2) - self.radius) {
barViewUn.bounds.size.height = self.initialBarHeight + CGFloat(wavePeak)
}
barView = barViewUn
// some code after
}
3. Warnings
By the way, you should get rid of all the warnings in your code. I didn't clean up my answer code to keep it comparable with the orginal code.
For example, in var barView = self.rectArray[i] as? CustomView you don't need the conditional cast, because the array already contains CustomView objects.
So, all the barViewUn stuff is unnecessary.
Much more to find and to clean up.

How to implement range slider in Swift

I'm trying to implement Range Slider and I used custom control called NMRangeSlider.
But when I use it, the slider doesn't appear at all. Could it be also because it's all written in Objective-C?
This is how I've currently implemented it:
var rangeSlider = NMRangeSlider(frame: CGRectMake(16, 6, 275, 34))
rangeSlider.lowerValue = 0.54
rangeSlider.upperValue = 0.94
self.view.addSubview(rangeSlider)
To create a custom Range Slider I found a good solution here: range finder tutorial iOS 8 but I needed this in swift 3 for my project. I updated this for Swift 3 iOS 10 here:
in your main view controller add this to viewDidLayOut to show a range slider.
override func viewDidLayoutSubviews() {
let margin: CGFloat = 20.0
let width = view.bounds.width - 2.0 * margin
rangeSlider.frame = CGRect(x: margin, y: margin + topLayoutGuide.length + 170, width: width, height: 31.0)
}
create the helper function to print slider output below viewDidLayoutSubviews()
func rangeSliderValueChanged() { //rangeSlider: RangeSlider
print("Range slider value changed: \(rangeSlider.lowerValue) \(rangeSlider.upperValue) ")//(\(rangeSlider.lowerValue) \(rangeSlider.upperValue))
}
Create the file RangeSlider.swift and add this to it:
import UIKit
import QuartzCore
class RangeSlider: UIControl {
var minimumValue = 0.0
var maximumValue = 1.0
var lowerValue = 0.2
var upperValue = 0.8
let trackLayer = RangeSliderTrackLayer()//= CALayer() defined in RangeSliderTrackLayer.swift
let lowerThumbLayer = RangeSliderThumbLayer()//CALayer()
let upperThumbLayer = RangeSliderThumbLayer()//CALayer()
var previousLocation = CGPoint()
var trackTintColor = UIColor(white: 0.9, alpha: 1.0)
var trackHighlightTintColor = UIColor(red: 0.0, green: 0.45, blue: 0.94, alpha: 1.0)
var thumbTintColor = UIColor.white
var curvaceousness : CGFloat = 1.0
var thumbWidth: CGFloat {
return CGFloat(bounds.height)
}
override init(frame: CGRect) {
super.init(frame: frame)
trackLayer.rangeSlider = self
trackLayer.contentsScale = UIScreen.main.scale
layer.addSublayer(trackLayer)
lowerThumbLayer.rangeSlider = self
lowerThumbLayer.contentsScale = UIScreen.main.scale
layer.addSublayer(lowerThumbLayer)
upperThumbLayer.rangeSlider = self
upperThumbLayer.contentsScale = UIScreen.main.scale
layer.addSublayer(upperThumbLayer)
}
required init?(coder: NSCoder) {
super.init(coder: coder)
}
func updateLayerFrames() {
trackLayer.frame = bounds.insetBy(dx: 0.0, dy: bounds.height / 3)
trackLayer.setNeedsDisplay()
let lowerThumbCenter = CGFloat(positionForValue(value: lowerValue))
lowerThumbLayer.frame = CGRect(x: lowerThumbCenter - thumbWidth / 2.0, y: 0.0,
width: thumbWidth, height: thumbWidth)
lowerThumbLayer.setNeedsDisplay()
let upperThumbCenter = CGFloat(positionForValue(value: upperValue))
upperThumbLayer.frame = CGRect(x: upperThumbCenter - thumbWidth / 2.0, y: 0.0,
width: thumbWidth, height: thumbWidth)
upperThumbLayer.setNeedsDisplay()
}
func positionForValue(value: Double) -> Double {
return Double(bounds.width - thumbWidth) * (value - minimumValue) /
(maximumValue - minimumValue) + Double(thumbWidth / 2.0)
}
override var frame: CGRect {
didSet {
updateLayerFrames()
}
}
override func beginTracking(_ touch: UITouch, with event: UIEvent?) -> Bool {
previousLocation = touch.location(in: self)
// Hit test the thumb layers
if lowerThumbLayer.frame.contains(previousLocation) {
lowerThumbLayer.highlighted = true
} else if upperThumbLayer.frame.contains(previousLocation) {
upperThumbLayer.highlighted = true
}
return lowerThumbLayer.highlighted || upperThumbLayer.highlighted
}
func boundValue(value: Double, toLowerValue lowerValue: Double, upperValue: Double) -> Double {
return min(max(value, lowerValue), upperValue)
}
override func continueTracking(_ touch: UITouch, with event: UIEvent?) -> Bool {
let location = touch.location(in: self)
// 1. Determine by how much the user has dragged
let deltaLocation = Double(location.x - previousLocation.x)
let deltaValue = (maximumValue - minimumValue) * deltaLocation / Double(bounds.width - thumbWidth)
previousLocation = location
// 2. Update the values
if lowerThumbLayer.highlighted {
lowerValue += deltaValue
lowerValue = boundValue(value: lowerValue, toLowerValue: minimumValue, upperValue: upperValue)
} else if upperThumbLayer.highlighted {
upperValue += deltaValue
upperValue = boundValue(value: upperValue, toLowerValue: lowerValue, upperValue: maximumValue)
}
// 3. Update the UI
CATransaction.begin()
CATransaction.setDisableActions(true)
updateLayerFrames()
CATransaction.commit()
sendActions(for: .valueChanged)
return true
}
override func endTracking(_ touch: UITouch?, with event: UIEvent?) {
lowerThumbLayer.highlighted = false
upperThumbLayer.highlighted = false
}
}
Next add the thumb layer subclass file RangeSliderThumbLayer.swift and add this to it:
import UIKit
class RangeSliderThumbLayer: CALayer {
var highlighted = false
weak var rangeSlider: RangeSlider?
override func draw(in ctx: CGContext) {
if let slider = rangeSlider {
let thumbFrame = bounds.insetBy(dx: 2.0, dy: 2.0)
let cornerRadius = thumbFrame.height * slider.curvaceousness / 2.0
let thumbPath = UIBezierPath(roundedRect: thumbFrame, cornerRadius: cornerRadius)
// Fill - with a subtle shadow
let shadowColor = UIColor.gray
ctx.setShadow(offset: CGSize(width: 0.0, height: 1.0), blur: 1.0, color: shadowColor.cgColor)
ctx.setFillColor(slider.thumbTintColor.cgColor)
ctx.addPath(thumbPath.cgPath)
ctx.fillPath()
// Outline
ctx.setStrokeColor(shadowColor.cgColor)
ctx.setLineWidth(0.5)
ctx.addPath(thumbPath.cgPath)
ctx.strokePath()
if highlighted {
ctx.setFillColor(UIColor(white: 0.0, alpha: 0.1).cgColor)
ctx.addPath(thumbPath.cgPath)
ctx.fillPath()
}
}
}
}
Finally add the track layer subclass file RangeSliderTrackLayer.swift and add the following to it:
import Foundation
import UIKit
import QuartzCore
class RangeSliderTrackLayer: CALayer {
weak var rangeSlider: RangeSlider?
override func draw(in ctx: CGContext) {
if let slider = rangeSlider {
// Clip
let cornerRadius = bounds.height * slider.curvaceousness / 2.0
let path = UIBezierPath(roundedRect: bounds, cornerRadius: cornerRadius)
ctx.addPath(path.cgPath)
// Fill the track
ctx.setFillColor(slider.trackTintColor.cgColor)
ctx.addPath(path.cgPath)
ctx.fillPath()
// Fill the highlighted range
ctx.setFillColor(slider.trackHighlightTintColor.cgColor)
let lowerValuePosition = CGFloat(slider.positionForValue(value: slider.lowerValue))
let upperValuePosition = CGFloat(slider.positionForValue(value: slider.upperValue))
let rect = CGRect(x: lowerValuePosition, y: 0.0, width: upperValuePosition - lowerValuePosition, height: bounds.height)
ctx.fill(rect)
}
}
}
Build Run and Get:
UPDATE:
It did not show to me, because it was all white. So the solution, without using any other framework and sticking with this one - you need to set all the views for all the components and then it will display well:
I have tried to import it in Swift as I used it before in Objective-C code, but without any luck. If I set everything properly and add it either in viewDidLoad() or viewDidAppear(), nothing gets displayed. One thing is worth mentioning, though - when I enter View Debug Hierarchy, the slider actually is there on the canvas:
But it's simply not rendered with all the colors that I did set before adding in it to the view. For the record - this is the code I used:
override func viewDidAppear(animated: Bool) {
var rangeSlider = NMRangeSlider(frame: CGRectMake(50, 50, 275, 34))
rangeSlider.lowerValue = 0.54
rangeSlider.upperValue = 0.94
let range = 10.0
let oneStep = 1.0 / range
let minRange: Float = 0.05
rangeSlider.minimumRange = minRange
let bgImage = UIView(frame: rangeSlider.frame)
bgImage.backgroundColor = .greenColor()
rangeSlider.trackImage = bgImage.pb_takeSnapshot()
let trackView = UIView(frame: CGRectMake(0, 0, rangeSlider.frame.size.width, 29))
trackView.backgroundColor = .whiteColor()
trackView.opaque = false
trackView.alpha = 0.3
rangeSlider.trackImage = UIImage(named: "")
let lowerThumb = UIView(frame: CGRectMake(0, 0, 8, 29))
lowerThumb.backgroundColor = .whiteColor()
let lowerThumbHigh = UIView(frame: CGRectMake(0, 0, 8, 29))
lowerThumbHigh.backgroundColor = UIColor.blueColor()
rangeSlider.lowerHandleImageNormal = lowerThumb.pb_takeSnapshot()
rangeSlider.lowerHandleImageHighlighted = lowerThumbHigh.pb_takeSnapshot()
rangeSlider.upperHandleImageNormal = lowerThumb.pb_takeSnapshot()
rangeSlider.upperHandleImageHighlighted = lowerThumbHigh.pb_takeSnapshot()
self.view.addSubview(rangeSlider)
self.view.backgroundColor = .lightGrayColor()
}
Using the method for capturing the UIView as UIImage mentioned in this question:
extension UIView {
func pb_takeSnapshot() -> UIImage {
UIGraphicsBeginImageContextWithOptions(bounds.size, false, UIScreen.mainScreen().scale)
drawViewHierarchyInRect(self.bounds, afterScreenUpdates: true)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
}
Other solution:
You can also try sgwilly/RangeSlider instead, it's written in Swift and therefore you won't even need a Bridging Header.
try this code :
override func viewDidLayoutSubviews() {
let margin: CGFloat = 20.0
let width = view.bounds.width - 2.0 * margin
rangeSlider.frame = CGRect(x: margin, y: margin + topLayoutGuide.length,
width: width, height: 31.0)
}
I implemented the range slider using :
https://github.com/Zengzhihui/RangeSlider
In the GZRangeSlider class, there is a method called :
private func setLabelText()
In that method, just put :
leftTextLayer.frame = CGRectMake(leftHandleLayer.frame.minX - 0.5 * (kTextWidth - leftHandleLayer.frame.width), leftHandleLayer.frame.minY - kTextHeight, kTextWidth, kTextHeight)
rightTextLayer.frame = CGRectMake(rightHandleLayer.frame.minX - 0.5 * (kTextWidth - leftHandleLayer.frame.width), leftTextLayer.frame.minY, kTextWidth, kTextHeight)
to animate the lower and upper labels..
This one is working well for me and its in swift.. just try it..

iOS voice recorder visualization on swift

I want to make visualization on the record like on the original Voice Memo app:
I know I can get the levels
- updateMeters
- peakPowerForChannel:
- averagePowerForChannel:
but how to draw the graphic, should I do it custom? Is there free/paid source I can use?
I was having the same problem. I wanted to create a voice memos clone. Recently, I found a solution and wrote an article about it on medium.
I created a subclass from UIView class and drew the bars with CGRect.
import UIKit
class AudioVisualizerView: UIView {
// Bar width
var barWidth: CGFloat = 4.0
// Indicate that waveform should draw active/inactive state
var active = false {
didSet {
if self.active {
self.color = UIColor.red.cgColor
}
else {
self.color = UIColor.gray.cgColor
}
}
}
// Color for bars
var color = UIColor.gray.cgColor
// Given waveforms
var waveforms: [Int] = Array(repeating: 0, count: 100)
// MARK: - Init
override init (frame : CGRect) {
super.init(frame : frame)
self.backgroundColor = UIColor.clear
}
required init?(coder decoder: NSCoder) {
super.init(coder: decoder)
self.backgroundColor = UIColor.clear
}
// MARK: - Draw bars
override func draw(_ rect: CGRect) {
guard let context = UIGraphicsGetCurrentContext() else {
return
}
context.clear(rect)
context.setFillColor(red: 0, green: 0, blue: 0, alpha: 0)
context.fill(rect)
context.setLineWidth(1)
context.setStrokeColor(self.color)
let w = rect.size.width
let h = rect.size.height
let t = Int(w / self.barWidth)
let s = max(0, self.waveforms.count - t)
let m = h / 2
let r = self.barWidth / 2
let x = m - r
var bar: CGFloat = 0
for i in s ..< self.waveforms.count {
var v = h * CGFloat(self.waveforms[i]) / 50.0
if v > x {
v = x
}
else if v < 3 {
v = 3
}
let oneX = bar * self.barWidth
var oneY: CGFloat = 0
let twoX = oneX + r
var twoY: CGFloat = 0
var twoS: CGFloat = 0
var twoE: CGFloat = 0
var twoC: Bool = false
let threeX = twoX + r
let threeY = m
if i % 2 == 1 {
oneY = m - v
twoY = m - v
twoS = -180.degreesToRadians
twoE = 0.degreesToRadians
twoC = false
}
else {
oneY = m + v
twoY = m + v
twoS = 180.degreesToRadians
twoE = 0.degreesToRadians
twoC = true
}
context.move(to: CGPoint(x: oneX, y: m))
context.addLine(to: CGPoint(x: oneX, y: oneY))
context.addArc(center: CGPoint(x: twoX, y: twoY), radius: r, startAngle: twoS, endAngle: twoE, clockwise: twoC)
context.addLine(to: CGPoint(x: threeX, y: threeY))
context.strokePath()
bar += 1
}
}
}
For the recording function, I used installTap instance method to record, monitor, and observe the output of the node.
let inputNode = self.audioEngine.inputNode
guard let format = self.format() else {
return
}
inputNode.installTap(onBus: 0, bufferSize: 1024, format: format) { (buffer, time) in
let level: Float = -50
let length: UInt32 = 1024
buffer.frameLength = length
let channels = UnsafeBufferPointer(start: buffer.floatChannelData, count: Int(buffer.format.channelCount))
var value: Float = 0
vDSP_meamgv(channels[0], 1, &value, vDSP_Length(length))
var average: Float = ((value == 0) ? -100 : 20.0 * log10f(value))
if average > 0 {
average = 0
} else if average < -100 {
average = -100
}
let silent = average < level
let ts = NSDate().timeIntervalSince1970
if ts - self.renderTs > 0.1 {
let floats = UnsafeBufferPointer(start: channels[0], count: Int(buffer.frameLength))
let frame = floats.map({ (f) -> Int in
return Int(f * Float(Int16.max))
})
DispatchQueue.main.async {
let seconds = (ts - self.recordingTs)
self.timeLabel.text = seconds.toTimeString
self.renderTs = ts
let len = self.audioView.waveforms.count
for i in 0 ..< len {
let idx = ((frame.count - 1) * i) / len
let f: Float = sqrt(1.5 * abs(Float(frame[idx])) / Float(Int16.max))
self.audioView.waveforms[i] = min(49, Int(f * 50))
}
self.audioView.active = !silent
self.audioView.setNeedsDisplay()
}
}
Here is the article I wrote, and I hope that you will find what you are looking for:
https://medium.com/flawless-app-stories/how-i-created-apples-voice-memos-clone-b6cd6d65f580
The project is also available on GitHub:
https://github.com/HassanElDesouky/VoiceMemosClone
Please note that I'm still a beginner, and I'm sorry my code doesn't seem that clean!

Resources