Find the Bounding Box of Submesh in iOS - ios

I am drawing a 3D model as below from a obj file. I need to find the Bounding box for each submesh of that obj file.
let assetURL = Bundle.main.url(forResource: "train", withExtension: "obj")!
let allocator = MTKMeshBufferAllocator(device: device)
let asset = MDLAsset(url: assetURL, vertexDescriptor: vertexDescriptor, bufferAllocator: meshAllocator)
let mdlMesh = asset.object(at: 0) as! MDLMesh
mesh = try! MTKMesh(mesh: mdlMesh, device: device)
for submesh in mesh.submeshes {
//I need to find the bounding box for each Submesh
}
How can I achieve that in iOS.

Here's a function that takes an MTKMesh and produces an array of MDLAxisAlignedBoundingBoxes in model space:
enum BoundingBoxError : Error {
case invalidIndexType(String)
}
func boundingBoxesForSubmeshes(of mtkMesh: MTKMesh, positionAttributeData: MDLVertexAttributeData) throws -> [MDLAxisAlignedBoundingBox]
{
struct VertexPosition {
var x, y, z: Float
}
var boundingBoxes = [MDLAxisAlignedBoundingBox]()
var minX = Float.greatestFiniteMagnitude
var minY = Float.greatestFiniteMagnitude
var minZ = Float.greatestFiniteMagnitude
var maxX = -Float.greatestFiniteMagnitude
var maxY = -Float.greatestFiniteMagnitude
var maxZ = -Float.greatestFiniteMagnitude
let positionsPtr = positionAttributeData.dataStart
for submesh in mtkMesh.submeshes {
let indexBuffer = submesh.indexBuffer
let mtlIndexBuffer = indexBuffer.buffer
let submeshIndicesRaw = mtlIndexBuffer.contents().advanced(by: indexBuffer.offset)
if submesh.indexType != .uint32 {
throw BoundingBoxError.invalidIndexType("Expected 32-bit indices")
}
let submeshIndicesPtr = submeshIndicesRaw.bindMemory(to: UInt32.self,
capacity: submesh.indexCount)
let submeshIndices = UnsafeMutableBufferPointer<UInt32>(start: submeshIndicesPtr,
count:submesh.indexCount)
for index in submeshIndices {
let positionPtr = positionsPtr.advanced(by: Int(index) * positionAttributeData.stride)
let position = positionPtr.assumingMemoryBound(to: VertexPosition.self).pointee
if position.x < minX { minX = position.x }
if position.y < minY { minY = position.y }
if position.z < minZ { minZ = position.z }
if position.x > maxX { maxX = position.x }
if position.y > maxY { maxY = position.y }
if position.z > maxZ { maxZ = position.z }
}
let min = SIMD3<Float>(x: minX, y: minY, z: minZ)
let max = SIMD3<Float>(x: maxX, y: maxY, z: maxZ)
let box = MDLAxisAlignedBoundingBox(maxBounds: max, minBounds: min)
boundingBoxes.append(box)
}
return boundingBoxes
}
Note that this function expects the submesh to have 32-bit indices, though it could be adapted to support 16-bit indices as well.
To drive it, you'll also need to supply an MDLVertexAttributeData object that points to the vertex data in the containing mesh. Here's how to do that:
let mesh = try! MTKMesh(mesh: mdlMesh, device: device)
let positionAttrData = mdlMesh.vertexAttributeData(forAttributeNamed: MDLVertexAttributePosition, as: .float3)!
let boundingBoxes = try? boundingBoxesForSubmeshes(of: mesh, positionAttributeData: positionAttrData)
I haven't tested this code thoroughly, but it seems to produce sane results on my limited set of test cases. Visualizing the boxes with a debug mesh should immediately show whether or not they're correct.

Related

How tom Show 3D pie-chart in swift 4 in my app

I am trying to show a 3D pie-chart in my app in swift 4.
but can't find any solution.does anyone help me or give me any suggestion please??
private func createPieChart() -> SCNNode {
let aNode = SCNNode()
var total:Float = 0.0
//let numRows = data.numberOfRows()
let numColumns = data.numberOfColums()
let i = 0
for j in 0 ..< numColumns {
let val = data.valueForIndexPath(row:i, column: j)
total = total + val
}
var startDeg:Float = 0.0
var startRad:Float = 0.0
for j in 0 ..< numColumns {
let val = data.valueForIndexPath(row:i, column: j)
let pct = val*360.0/total
startRad = Float(startDeg) * Float(M_PI) / Float(180.0)
let endDeg = startDeg + pct - 1.0
let endRad:Float = Float(endDeg) * Float(M_PI) / Float(180.0)
let circlePath = UIBezierPath()
circlePath.move(to: CGPoint.zero)
circlePath.addArc(withCenter: CGPoint.zero, radius: 20.0, startAngle:CGFloat(startRad), endAngle: CGFloat(endRad), clockwise: true)
startDeg = endDeg + 1
let node = SCNNode(geometry: SCNShape(path:circlePath, extrusionDepth: 5.0))
node.geometry?.firstMaterial?.diffuse.contents = data.colorForIndexPath(row: i, column: j)
aNode.addChildNode(node)
}
return aNode
}

Absolute depth from iPhone X back camera using disparity from AVDepthData?

I'm trying to estimate the absolute depth (in meters) from an AVDepthData object based on this equation: depth = baseline x focal_length / (disparity + d_offset). I have all the parameters from cameraCalibrationData, but does this still apply to an image taken in Portrait mode with iPhone X since the two cameras are offset vertically? Also based on WWDC 2017 Session 507, the disparity map is relative, but the AVDepthData documentation states that the disparity values are in 1/m. So can I apply the equation on the values in the depth data as is or do I need to do some additional processing beforehand?
var depthData: AVDepthData
do {
depthData = try AVDepthData(fromDictionaryRepresentation: auxDataInfo)
} catch {
return nil
}
// Working with disparity
if depthData.depthDataType != kCVPixelFormatType_DisparityFloat32 {
depthData = depthData.converting(toDepthDataType: kCVPixelFormatType_DisparityFloat32)
}
CVPixelBufferLockBaseAddress(depthData.depthDataMap, CVPixelBufferLockFlags(rawValue: 0))
// Scale Intrinsic matrix to be in depth image pixel space
guard var intrinsicMatrix = depthData.cameraCalibrationData?.intrinsicMatrix else{ return nil}
let referenceDimensions = depthData.cameraCalibrationData?.intrinsicMatrixReferenceDimensions
let depthWidth = CVPixelBufferGetWidth(depthData.depthDataMap)
let depthHeight = CVPixelBufferGetHeight(depthData.depthDataMap)
let depthSize = CGSize(width: depthWidth, height: depthHeight)
let ratio: Float = Float(referenceDimensions.width) / Float(depthWidth)
intrinsicMatrix[0][0] /= ratio;
intrinsicMatrix[1][1] /= ratio;
intrinsicMatrix[2][0] /= ratio;
intrinsicMatrix[2][1] /= ratio;
// For converting disparity to depth
let baseline: Float = 1.45/100.0 // measured baseline in m
// Prepare for lens distortion correction
let lut = depthData.cameraCalibrationData?.lensDistortionLookupTable
let center = depthData.cameraCalibrationData?.lensDistortionCenter
let centerX: CGFloat = center!.x / CGFloat(ratio)
let centerY: CGFloat = center!.y / CGFloat(ratio)
let correctedCenter = CGPoint(x: centerX, y: centerY);
// Build point cloud
var pointCloud = Array<Any>()
for dataY in 0 ..< depthHeight{
let rowData = CVPixelBufferGetBaseAddress(depthData.depthDataMap)! + dataY * CVPixelBufferGetBytesPerRow(depthData.depthDataMap)
let data = UnsafeBufferPointer(start: rowData.assumingMemoryBound(to: Float32.self), count: depthWidth)
for dataX in 0 ..< depthWidth{
let dispZ = data[dataX]
let pointZ = baseline * intrinsicMatrix[0][0] / dispZ
let currPoint: CGPoint = CGPoint(x: dataX,y: dataY)
let correctedPoint: CGPoint = lensDistortionPoint(for: currPoint, lookupTable: lut!, distortionOpticalCenter: correctedCenter,imageSize: depthSize)
let pointX = (Float(correctedPoint.x) - intrinsicMatrix[2][0]) * pointZ / intrinsicMatrix[0][0];
let pointY = (Float(correctedPoint.y) - intrinsicMatrix[2][1]) * pointZ / intrinsicMatrix[1][1];
pointCloud.append([pointX,pointY,pointZ])
}
}
CVPixelBufferUnlockBaseAddress(depthData.depthDataMap, CVPixelBufferLockFlags(rawValue: 0))

Copy a CVPixelBuffer on any iOS device

I'm having a great deal of difficulty coming up with code that reliably copies a CVPixelBuffer on any iOS device. My first attempt worked fine until I tried it on an iPad Pro:
extension CVPixelBuffer {
func deepcopy() -> CVPixelBuffer? {
let width = CVPixelBufferGetWidth(self)
let height = CVPixelBufferGetHeight(self)
let format = CVPixelBufferGetPixelFormatType(self)
var pixelBufferCopyOptional:CVPixelBuffer?
CVPixelBufferCreate(nil, width, height, format, nil, &pixelBufferCopyOptional)
if let pixelBufferCopy = pixelBufferCopyOptional {
CVPixelBufferLockBaseAddress(self, kCVPixelBufferLock_ReadOnly)
CVPixelBufferLockBaseAddress(pixelBufferCopy, 0)
let baseAddress = CVPixelBufferGetBaseAddress(self)
let dataSize = CVPixelBufferGetDataSize(self)
let target = CVPixelBufferGetBaseAddress(pixelBufferCopy)
memcpy(target, baseAddress, dataSize)
CVPixelBufferUnlockBaseAddress(pixelBufferCopy, 0)
CVPixelBufferUnlockBaseAddress(self, kCVPixelBufferLock_ReadOnly)
}
return pixelBufferCopyOptional
}
}
The above crashes on an iPad Pro because CVPixelBufferGetDataSize(self) is slightly larger than CVPixelBufferGetDataSize(pixelBufferCopy), so the memcpy writes to unallocated memory.
So I gave up with that and tried this:
func copy() -> CVPixelBuffer?
{
precondition(CFGetTypeID(self) == CVPixelBufferGetTypeID(), "copy() cannot be called on a non-CVPixelBuffer")
var _copy: CVPixelBuffer?
CVPixelBufferCreate(
nil,
CVPixelBufferGetWidth(self),
CVPixelBufferGetHeight(self),
CVPixelBufferGetPixelFormatType(self),
CVBufferGetAttachments(self, .shouldPropagate),
&_copy)
guard let copy = _copy else { return nil }
CVPixelBufferLockBaseAddress(self, .readOnly)
CVPixelBufferLockBaseAddress(copy, [])
defer
{
CVPixelBufferUnlockBaseAddress(copy, [])
CVPixelBufferUnlockBaseAddress(self, .readOnly)
}
for plane in 0 ..< CVPixelBufferGetPlaneCount(self)
{
let dest = CVPixelBufferGetBaseAddressOfPlane(copy, plane)
let source = CVPixelBufferGetBaseAddressOfPlane(self, plane)
let height = CVPixelBufferGetHeightOfPlane(self, plane)
let bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(self, plane)
memcpy(dest, source, height * bytesPerRow)
}
return copy
}
That works on both my test devices, but it's just reached actual customers and it turns out it crashes on the iPad 6 (and only that device so far). It's an EXC_BAD_ACCESS on the call to memcpy() again.
Seems crazy that there isn't a simple API call for this given how hard it seems to be to make it work reliably. Or am I make it harder than it needs to be? Thanks for any advice!
This questions and answer combo is solid gold. Let me add value with a slight refactor and some control flow to account for CVPixelBuffers that do not have planes.
public extension CVPixelBuffer {
func copy() throws -> CVPixelBuffer {
precondition(CFGetTypeID(self) == CVPixelBufferGetTypeID(), "copy() cannot be called on a non-CVPixelBuffer")
var _copy: CVPixelBuffer?
let width = CVPixelBufferGetWidth(self)
let height = CVPixelBufferGetHeight(self)
let formatType = CVPixelBufferGetPixelFormatType(self)
let attachments = CVBufferGetAttachments(self, .shouldPropagate)
CVPixelBufferCreate(nil, width, height, formatType, attachments, &_copy)
guard let copy = _copy else {
throw PixelBufferCopyError.allocationFailed
}
CVPixelBufferLockBaseAddress(self, .readOnly)
CVPixelBufferLockBaseAddress(copy, [])
defer {
CVPixelBufferUnlockBaseAddress(copy, [])
CVPixelBufferUnlockBaseAddress(self, .readOnly)
}
let pixelBufferPlaneCount: Int = CVPixelBufferGetPlaneCount(self)
if pixelBufferPlaneCount == 0 {
let dest = CVPixelBufferGetBaseAddress(copy)
let source = CVPixelBufferGetBaseAddress(self)
let height = CVPixelBufferGetHeight(self)
let bytesPerRowSrc = CVPixelBufferGetBytesPerRow(self)
let bytesPerRowDest = CVPixelBufferGetBytesPerRow(copy)
if bytesPerRowSrc == bytesPerRowDest {
memcpy(dest, source, height * bytesPerRowSrc)
}else {
var startOfRowSrc = source
var startOfRowDest = dest
for _ in 0..<height {
memcpy(startOfRowDest, startOfRowSrc, min(bytesPerRowSrc, bytesPerRowDest))
startOfRowSrc = startOfRowSrc?.advanced(by: bytesPerRowSrc)
startOfRowDest = startOfRowDest?.advanced(by: bytesPerRowDest)
}
}
}else {
for plane in 0 ..< pixelBufferPlaneCount {
let dest = CVPixelBufferGetBaseAddressOfPlane(copy, plane)
let source = CVPixelBufferGetBaseAddressOfPlane(self, plane)
let height = CVPixelBufferGetHeightOfPlane(self, plane)
let bytesPerRowSrc = CVPixelBufferGetBytesPerRowOfPlane(self, plane)
let bytesPerRowDest = CVPixelBufferGetBytesPerRowOfPlane(copy, plane)
if bytesPerRowSrc == bytesPerRowDest {
memcpy(dest, source, height * bytesPerRowSrc)
}else {
var startOfRowSrc = source
var startOfRowDest = dest
for _ in 0..<height {
memcpy(startOfRowDest, startOfRowSrc, min(bytesPerRowSrc, bytesPerRowDest))
startOfRowSrc = startOfRowSrc?.advanced(by: bytesPerRowSrc)
startOfRowDest = startOfRowDest?.advanced(by: bytesPerRowDest)
}
}
}
}
return copy
}
}
Valid with Swift 5. To provide a little more background... There are many formats that AVCaptureVideoDataOutput .videoSettings property can take. Not all of them have planes especially ones that ML Models might need.
The second implementation looks quite solid. The only problem I can imagine is that a plane in the new pixel buffer is allocated with a different stride length (bytes per row). The stride length is based on width × (bytes per pixel) and then rounded up in an unspecified way to achieve optimal memory access.
So check if:
CVPixelBufferGetBytesPerRowOfPlane(self, plane) == CVPixelBufferGetBytesPerRowOfPlane(copy, plane
If not, copy the pixel plane row by row:
for plane in 0 ..< CVPixelBufferGetPlaneCount(self)
{
let dest = CVPixelBufferGetBaseAddressOfPlane(copy, plane)
let source = CVPixelBufferGetBaseAddressOfPlane(self, plane)
let height = CVPixelBufferGetHeightOfPlane(self, plane)
let bytesPerRowSrc = CVPixelBufferGetBytesPerRowOfPlane(self, plane)
let bytesPerRowDest = CVPixelBufferGetBytesPerRowOfPlane(copy, plane)
if bytesPerRowSrc == bytesPerRowDest {
memcpy(dest, source, height * bytesPerRowSrc)
} else {
var startOfRowSrc = source
var startOfRowDest = dest
for _ in 0..<height {
memcpy(startOfRowDest, startOfRowSrc, min(bytesPerRowSrc, bytesPerRowDest))
startOfRowSrc += bytesPerRowSrc
startOfRowDest += bytesPerRowDest
}
}
}

How to draw an arc on Google Maps in iOS?

How to draw an arc between two coordinate points in Google Maps like in this image and same like facebook post in iOS ?
Before using the below function, don't forget to import GoogleMaps
Credits: xomena
func drawArcPolyline(startLocation: CLLocationCoordinate2D?, endLocation: CLLocationCoordinate2D?) {
if let startLocation = startLocation, let endLocation = endLocation {
//swap the startLocation & endLocation if you want to reverse the direction of polyline arc formed.
let mapView = GMSMapView()
let path = GMSMutablePath()
path.add(startLocation)
path.add(endLocation)
// Curve Line
let k: Double = 0.2 //try between 0.5 to 0.2 for better results that suits you
let d = GMSGeometryDistance(startLocation, endLocation)
let h = GMSGeometryHeading(startLocation, endLocation)
//Midpoint position
let p = GMSGeometryOffset(startLocation, d * 0.5, h)
//Apply some mathematics to calculate position of the circle center
let x = (1 - k * k) * d * 0.5 / (2 * k)
let r = (1 + k * k) * d * 0.5 / (2 * k)
let c = GMSGeometryOffset(p, x, h + 90.0)
//Polyline options
//Calculate heading between circle center and two points
let h1 = GMSGeometryHeading(c, startLocation)
let h2 = GMSGeometryHeading(c, endLocation)
//Calculate positions of points on circle border and add them to polyline options
let numpoints = 100.0
let step = ((h2 - h1) / Double(numpoints))
for i in stride(from: 0.0, to: numpoints, by: 1) {
let pi = GMSGeometryOffset(c, r, h1 + i * step)
path.add(pi)
}
//Draw polyline
let polyline = GMSPolyline(path: path)
polyline.map = mapView // Assign GMSMapView as map
polyline.strokeWidth = 3.0
let styles = [GMSStrokeStyle.solidColor(UIColor.black), GMSStrokeStyle.solidColor(UIColor.clear)]
let lengths = [20, 20] // Play with this for dotted line
polyline.spans = GMSStyleSpans(polyline.path!, styles, lengths as [NSNumber], .rhumb)
let bounds = GMSCoordinateBounds(coordinate: startLocation, coordinate: endLocation)
let insets = UIEdgeInsets(top: 20, left: 20, bottom: 20, right: 20)
let camera = mapView.camera(for: bounds, insets: insets)!
mapView.animate(to: camera)
}
}
I used Bezier quadratic equation to draw curved lines. You can have a look on to the implementation. Here is the sample code.
func bezierPath(from startLocation: CLLocationCoordinate2D, to endLocation: CLLocationCoordinate2D) -> GMSMutablePath {
let distance = GMSGeometryDistance(startLocation, endLocation)
let midPoint = GMSGeometryInterpolate(startLocation, endLocation, 0.5)
let midToStartLocHeading = GMSGeometryHeading(midPoint, startLocation)
let controlPointAngle = 360.0 - (90.0 - midToStartLocHeading)
let controlPoint = GMSGeometryOffset(midPoint, distance / 2.0 , controlPointAngle)
let path = GMSMutablePath()
let stepper = 0.05
let range = stride(from: 0.0, through: 1.0, by: stepper)// t = [0,1]
func calculatePoint(when t: Double) -> CLLocationCoordinate2D {
let t1 = (1.0 - t)
let latitude = t1 * t1 * startLocation.latitude + 2 * t1 * t * controlPoint.latitude + t * t * endLocation.latitude
let longitude = t1 * t1 * startLocation.longitude + 2 * t1 * t * controlPoint.longitude + t * t * endLocation.longitude
let point = CLLocationCoordinate2D(latitude: latitude, longitude: longitude)
return point
}
range.map { calculatePoint(when: $0) }.forEach { path.add($0) }
return path
}
None of the answers mentioned is a full proof solution. For a few locations, it draws a circle instead of a polyline.
To resolve this we will calculate bearing(degrees clockwise from true north) and if it is less than zero, swap the start and end location.
func createArc(
startLocation: CLLocationCoordinate2D,
endLocation: CLLocationCoordinate2D) -> GMSPolyline {
var start = startLocation
var end = endLocation
if start.bearing(to: end) < 0.0 {
start = endLocation
end = startLocation
}
let angle = start.bearing(to: end) * Double.pi / 180.0
let k = abs(0.3 * sin(angle))
let path = GMSMutablePath()
let d = GMSGeometryDistance(start, end)
let h = GMSGeometryHeading(start, end)
let p = GMSGeometryOffset(start, d * 0.5, h)
let x = (1 - k * k) * d * 0.5 / (2 * k)
let r = (1 + k * k) * d * 0.5 / (2 * k)
let c = GMSGeometryOffset(p, x, h + 90.0)
var h1 = GMSGeometryHeading(c, start)
var h2 = GMSGeometryHeading(c, end)
if (h1 > 180) {
h1 = h1 - 360
}
if (h2 > 180) {
h2 = h2 - 360
}
let numpoints = 100.0
let step = ((h2 - h1) / Double(numpoints))
for i in stride(from: 0.0, to: numpoints, by: 1) {
let pi = GMSGeometryOffset(c, r, h1 + i * step)
path.add(pi)
}
path.add(end)
let polyline = GMSPolyline(path: path)
polyline.strokeWidth = 3.0
polyline.spans = GMSStyleSpans(
polyline.path!,
[GMSStrokeStyle.solidColor(UIColor(hex: "#2962ff"))],
[20, 20], .rhumb
)
return polyline
}
The bearing is the direction in which a vertical line on the map points, measured in degrees clockwise from north.
func bearing(to point: CLLocationCoordinate2D) -> Double {
func degreesToRadians(_ degrees: Double) -> Double { return degrees * Double.pi / 180.0 }
func radiansToDegrees(_ radians: Double) -> Double { return radians * 180.0 / Double.pi }
let lat1 = degreesToRadians(latitude)
let lon1 = degreesToRadians(longitude)
let lat2 = degreesToRadians(point.latitude);
let lon2 = degreesToRadians(point.longitude);
let dLon = lon2 - lon1;
let y = sin(dLon) * cos(lat2);
let x = cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(dLon);
let radiansBearing = atan2(y, x);
return radiansToDegrees(radiansBearing)
}
The answer above does not handle all the corner cases, here is one that draws the arcs nicely:
func drawArcPolyline(startLocation: CLLocationCoordinate2D?, endLocation: CLLocationCoordinate2D?) {
if let _ = startLocation, let _ = endLocation {
//swap the startLocation & endLocation if you want to reverse the direction of polyline arc formed.
var start = startLocation!
var end = endLocation!
var gradientColors = GMSStrokeStyle.gradient(
from: UIColor(red: 11.0/255, green: 211.0/255, blue: 200.0/255, alpha: 1),
to: UIColor(red: 0/255, green: 44.0/255, blue: 66.0/255, alpha: 1))
if startLocation!.heading(to: endLocation!) < 0.0 {
// need to reverse the start and end, and reverse the color
start = endLocation!
end = startLocation!
gradientColors = GMSStrokeStyle.gradient(
from: UIColor(red: 0/255, green: 44.0/255, blue: 66.0/255, alpha: 1),
to: UIColor(red: 11.0/255, green: 211.0/255, blue: 200.0/255, alpha: 1))
}
let path = GMSMutablePath()
// Curve Line
let k = abs(0.3 * sin((start.heading(to: end)).degreesToRadians)) // was 0.3
let d = GMSGeometryDistance(start, end)
let h = GMSGeometryHeading(start, end)
//Midpoint position
let p = GMSGeometryOffset(start, d * 0.5, h)
//Apply some mathematics to calculate position of the circle center
let x = (1-k*k)*d*0.5/(2*k);
let r = (1+k*k)*d*0.5/(2*k);
let c = GMSGeometryOffset(p, x, h + 90.0)
//Polyline options
//Calculate heading between circle center and two points
var h1 = GMSGeometryHeading(c, start)
var h2 = GMSGeometryHeading(c, end)
if(h1>180){
h1 = h1 - 360
}
if(h2>180){
h2 = h2 - 360
}
//Calculate positions of points on circle border and add them to polyline options
let numpoints = 100.0
let step = (h2 - h1) / numpoints
for i in stride(from: 0.0, to: numpoints, by: 1) {
let pi = GMSGeometryOffset(c, r, h1 + i * step)
path.add(pi)
}
path.add(end)
//Draw polyline
let polyline = GMSPolyline(path: path)
polyline.map = mapView // Assign GMSMapView as map
polyline.strokeWidth = 5.0
polyline.spans = [GMSStyleSpan(style: gradientColors)]
}
}
Objective-C version #Rouny answer
- (void)DrawCurvedPolylineOnMapFrom:(CLLocationCoordinate2D)startLocation To:(CLLocationCoordinate2D)endLocation
{
GMSMutablePath * path = [[GMSMutablePath alloc]init];
[path addCoordinate:startLocation];
[path addCoordinate:endLocation];
// Curve Line
double k = 0.2; //try between 0.5 to 0.2 for better results that suits you
CLLocationDistance d = GMSGeometryDistance(startLocation, endLocation);
float h = GMSGeometryHeading(startLocation , endLocation);
//Midpoint position
CLLocationCoordinate2D p = GMSGeometryOffset(startLocation, d * 0.5, h);
//Apply some mathematics to calculate position of the circle center
float x = (1-k*k)*d*0.5/(2*k);
float r = (1+k*k)*d*0.5/(2*k);
CLLocationCoordinate2D c = GMSGeometryOffset(p, x, h + -90.0);
//Polyline options
//Calculate heading between circle center and two points
float h1 = GMSGeometryHeading(c, startLocation);
float h2 = GMSGeometryHeading(c, endLocation);
//Calculate positions of points on circle border and add them to polyline options
float numpoints = 100;
float step = ((h2 - h1) / numpoints);
for (int i = 0; i < numpoints; i++) {
CLLocationCoordinate2D pi = GMSGeometryOffset(c, r, h1 + i * step);
[path addCoordinate:pi];
}
//Draw polyline
GMSPolyline * polyline = [GMSPolyline polylineWithPath:path];
polyline.map = mapView;
polyline.strokeWidth = 3.0;
NSArray *styles = #[[GMSStrokeStyle solidColor:kBaseColor],
[GMSStrokeStyle solidColor:[UIColor clearColor]]];
NSArray *lengths = #[#5, #5];
polyline.spans = GMSStyleSpans(polyline.path, styles, lengths, kGMSLengthRhumb);
GMSCoordinateBounds * bounds = [[GMSCoordinateBounds alloc]initWithCoordinate:startLocation coordinate:endLocation];
UIEdgeInsets insets = UIEdgeInsetsMake(20, 20, 20, 20);
GMSCameraPosition * camera = [mapView cameraForBounds:bounds insets:insets ];
[mapView animateToCameraPosition:camera];
}
Swift 5+
Very easy and Smooth way
//MARK: - Usage
let path = self.bezierPath(from: CLLocationCoordinate2D(latitude: kLatitude, longitude: kLongtitude), to: CLLocationCoordinate2D(latitude: self.restaurantLat, longitude: self.restaurantLong))
let polyline = GMSPolyline(path: path)
polyline.strokeWidth = 5.0
polyline.strokeColor = appClr
polyline.map = self.googleMapView // Google MapView
Simple Function
func drawArcPolyline(from startLocation: CLLocationCoordinate2D, to endLocation: CLLocationCoordinate2D) -> GMSMutablePath {
let distance = GMSGeometryDistance(startLocation, endLocation)
let midPoint = GMSGeometryInterpolate(startLocation, endLocation, 0.5)
let midToStartLocHeading = GMSGeometryHeading(midPoint, startLocation)
let controlPointAngle = 360.0 - (90.0 - midToStartLocHeading)
let controlPoint = GMSGeometryOffset(midPoint, distance / 2.0 , controlPointAngle)
let path = GMSMutablePath()
let stepper = 0.05
let range = stride(from: 0.0, through: 1.0, by: stepper)// t = [0,1]
func calculatePoint(when t: Double) -> CLLocationCoordinate2D {
let t1 = (1.0 - t)
let latitude = t1 * t1 * startLocation.latitude + 2 * t1 * t * controlPoint.latitude + t * t * endLocation.latitude
let longitude = t1 * t1 * startLocation.longitude + 2 * t1 * t * controlPoint.longitude + t * t * endLocation.longitude
let point = CLLocationCoordinate2D(latitude: latitude, longitude: longitude)
return point
}
range.map { calculatePoint(when: $0) }.forEach { path.add($0) }
return path
}

Why is my screen not rendering anything?

I have the following code. When I run this I am getting a blank screen. This block of code is inside sceneDidLoad() so they will get executed but not displaying anything. Am I missing something ?
let worldNode = SKNode()
let height = 100
let width = 100
let regions:[Float: String] = [-0.04: "sand", -0.08: "water", 0.9: "grass"]
let noiseSource = GKPerlinNoiseSource()
let noise: GKNoise = GKNoise(noiseSource: noiseSource)
let noiseMap: GKNoiseMap = GKNoiseMap(noise: noise)
let tileMapNode = SKTileMapNode()
tileMapNode.enableAutomapping = true
tileMapNode.numberOfRows = height
tileMapNode.numberOfColumns = width
worldNode.addChild(tileMapNode)
for y in 0 ... height {
for x in 0 ... width {
let currentHeight = noiseMap.value(atPosition: vector2(Int32(x), Int32(y)));
for (key, value) in regions {
if (currentHeight <= key) {
//colourMap [y * mapChunkSize + x] = regions[i];
let tileSize = CGSize(width: 32.0, height: 32.0)
let tileTexture = SKTexture(imageNamed: value)
let tileDef = SKTileDefinition(texture: tileTexture, size: tileSize)
let tileGroup = SKTileGroup(tileDefinition: tileDef)
tileMapNode.setTileGroup(tileGroup, forColumn: x, row: y)
print("Tiling: \(value)")
break;
}
}
}
}
self.addChild(worldNode)

Resources