Here is my code.
var frameCount = INT_MAX
...
let totalSize: UInt32 = 4096
let itemSize: UInt32 = 64
frameCount = totalSize / itemSize
I get "Binary operator '/' cannot be applied to two 'UInt32' operands" error message.
Is it really impossible or what did I miss?
The error message is a bit misleading. INT_MAX is defined as
public var INT_MAX: Int32 { get }
so with
var frameCount = INT_MAX
frameCount is defined as a variable of type Int32.
The result of the division totalSize / itemSize is a UInt32
however, and Swift does not implicitly convert types.
You can fix that by changing the initial definition to
var frameCount = UINT32_MAX
or perhaps simpler, let the compiler infer the type:
let totalSize: UInt32 = 4096
let itemSize: UInt32 = 64
let frameCount = totalSize / itemSize
If you need the result as a signed integer then you have to
convert it explicitly, e.g.
let frameCount = Int32(totalSize / itemSize)
Related
I'm trying to convert an UnsafePointer<UInt16> to an UnsafePointer<Float> and so far I ended with this solution:
let bufferSize = 1024
let buffer: UnsafePointer<UInt16> = ....
let tmp = UnsafeBufferPointer(start: buffer, count: bufferSize).map(Float.init)
let converted: UnsafePointer<Float> = UnsafePointer(tmp)
It works but I have the feeling it's not an efficient way since I'm creating an intermediate Array... Is there a better way to do that ?
You can use withMemoryRebound to convert a pointer from one type to another:
buffer.withMemoryRebound(to: Float.self, capacity: 1024) { converted -> Void in
// use `converted` here
}
But be careful that MemoryLayout<Float>.size is 4 (i.e. 32 bits) and MemoryLayout<UInt16> is obviously 2 (i.e.. 16 bits), so the bufferSize of your Float will be half of that of your UInt16 buffer.
Based on #Kametrixom answer, I have made some test application for parallel calculation of sum in an array.
My test application looks like this:
import UIKit
import Metal
class ViewController: UIViewController {
// Data type, has to be the same as in the shader
typealias DataType = CInt
override func viewDidLoad() {
super.viewDidLoad()
let data = (0..<10000000).map{ _ in DataType(200) } // Our data, randomly generated
var start, end : UInt64
var result:DataType = 0
start = mach_absolute_time()
data.withUnsafeBufferPointer { buffer in
for elem in buffer {
result += elem
}
}
end = mach_absolute_time()
print("CPU result: \(result), time: \(Double(end - start) / Double(NSEC_PER_SEC))")
result = 0
start = mach_absolute_time()
result = sumParallel4(data)
end = mach_absolute_time()
print("Metal result: \(result), time: \(Double(end - start) / Double(NSEC_PER_SEC))")
result = 0
start = mach_absolute_time()
result = sumParralel(data)
end = mach_absolute_time()
print("Metal result: \(result), time: \(Double(end - start) / Double(NSEC_PER_SEC))")
result = 0
start = mach_absolute_time()
result = sumParallel3(data)
end = mach_absolute_time()
print("Metal result: \(result), time: \(Double(end - start) / Double(NSEC_PER_SEC))")
}
func sumParralel(data : Array<DataType>) -> DataType {
let count = data.count
let elementsPerSum: Int = Int(sqrt(Double(count)))
let device = MTLCreateSystemDefaultDevice()!
let parsum = device.newDefaultLibrary()!.newFunctionWithName("parsum")!
let pipeline = try! device.newComputePipelineStateWithFunction(parsum)
var dataCount = CUnsignedInt(count)
var elementsPerSumC = CUnsignedInt(elementsPerSum)
let resultsCount = (count + elementsPerSum - 1) / elementsPerSum // Number of individual results = count / elementsPerSum (rounded up)
let dataBuffer = device.newBufferWithBytes(data, length: strideof(DataType) * count, options: []) // Our data in a buffer (copied)
let resultsBuffer = device.newBufferWithLength(strideof(DataType) * resultsCount, options: []) // A buffer for individual results (zero initialized)
let results = UnsafeBufferPointer<DataType>(start: UnsafePointer(resultsBuffer.contents()), count: resultsCount) // Our results in convenient form to compute the actual result later
let queue = device.newCommandQueue()
let cmds = queue.commandBuffer()
let encoder = cmds.computeCommandEncoder()
encoder.setComputePipelineState(pipeline)
encoder.setBuffer(dataBuffer, offset: 0, atIndex: 0)
encoder.setBytes(&dataCount, length: sizeofValue(dataCount), atIndex: 1)
encoder.setBuffer(resultsBuffer, offset: 0, atIndex: 2)
encoder.setBytes(&elementsPerSumC, length: sizeofValue(elementsPerSumC), atIndex: 3)
// We have to calculate the sum `resultCount` times => amount of threadgroups is `resultsCount` / `threadExecutionWidth` (rounded up) because each threadgroup will process `threadExecutionWidth` threads
let threadgroupsPerGrid = MTLSize(width: (resultsCount + pipeline.threadExecutionWidth - 1) / pipeline.threadExecutionWidth, height: 1, depth: 1)
// Here we set that each threadgroup should process `threadExecutionWidth` threads, the only important thing for performance is that this number is a multiple of `threadExecutionWidth` (here 1 times)
let threadsPerThreadgroup = MTLSize(width: pipeline.threadExecutionWidth, height: 1, depth: 1)
encoder.dispatchThreadgroups(threadgroupsPerGrid, threadsPerThreadgroup: threadsPerThreadgroup)
encoder.endEncoding()
var result : DataType = 0
cmds.commit()
cmds.waitUntilCompleted()
for elem in results {
result += elem
}
return result
}
func sumParralel1(data : Array<DataType>) -> UnsafeBufferPointer<DataType> {
let count = data.count
let elementsPerSum: Int = Int(sqrt(Double(count)))
let device = MTLCreateSystemDefaultDevice()!
let parsum = device.newDefaultLibrary()!.newFunctionWithName("parsum")!
let pipeline = try! device.newComputePipelineStateWithFunction(parsum)
var dataCount = CUnsignedInt(count)
var elementsPerSumC = CUnsignedInt(elementsPerSum)
let resultsCount = (count + elementsPerSum - 1) / elementsPerSum // Number of individual results = count / elementsPerSum (rounded up)
let dataBuffer = device.newBufferWithBytes(data, length: strideof(DataType) * count, options: []) // Our data in a buffer (copied)
let resultsBuffer = device.newBufferWithLength(strideof(DataType) * resultsCount, options: []) // A buffer for individual results (zero initialized)
let results = UnsafeBufferPointer<DataType>(start: UnsafePointer(resultsBuffer.contents()), count: resultsCount) // Our results in convenient form to compute the actual result later
let queue = device.newCommandQueue()
let cmds = queue.commandBuffer()
let encoder = cmds.computeCommandEncoder()
encoder.setComputePipelineState(pipeline)
encoder.setBuffer(dataBuffer, offset: 0, atIndex: 0)
encoder.setBytes(&dataCount, length: sizeofValue(dataCount), atIndex: 1)
encoder.setBuffer(resultsBuffer, offset: 0, atIndex: 2)
encoder.setBytes(&elementsPerSumC, length: sizeofValue(elementsPerSumC), atIndex: 3)
// We have to calculate the sum `resultCount` times => amount of threadgroups is `resultsCount` / `threadExecutionWidth` (rounded up) because each threadgroup will process `threadExecutionWidth` threads
let threadgroupsPerGrid = MTLSize(width: (resultsCount + pipeline.threadExecutionWidth - 1) / pipeline.threadExecutionWidth, height: 1, depth: 1)
// Here we set that each threadgroup should process `threadExecutionWidth` threads, the only important thing for performance is that this number is a multiple of `threadExecutionWidth` (here 1 times)
let threadsPerThreadgroup = MTLSize(width: pipeline.threadExecutionWidth, height: 1, depth: 1)
encoder.dispatchThreadgroups(threadgroupsPerGrid, threadsPerThreadgroup: threadsPerThreadgroup)
encoder.endEncoding()
cmds.commit()
cmds.waitUntilCompleted()
return results
}
func sumParallel3(data : Array<DataType>) -> DataType {
var results = sumParralel1(data)
repeat {
results = sumParralel1(Array(results))
} while results.count >= 100
var result : DataType = 0
for elem in results {
result += elem
}
return result
}
func sumParallel4(data : Array<DataType>) -> DataType {
let queue = NSOperationQueue()
queue.maxConcurrentOperationCount = 4
var a0 : DataType = 0
var a1 : DataType = 0
var a2 : DataType = 0
var a3 : DataType = 0
let op0 = NSBlockOperation( block : {
for i in 0..<(data.count/4) {
a0 = a0 + data[i]
}
})
let op1 = NSBlockOperation( block : {
for i in (data.count/4)..<(data.count/2) {
a1 = a1 + data[i]
}
})
let op2 = NSBlockOperation( block : {
for i in (data.count/2)..<(3 * data.count/4) {
a2 = a2 + data[i]
}
})
let op3 = NSBlockOperation( block : {
for i in (3 * data.count/4)..<(data.count) {
a3 = a3 + data[i]
}
})
queue.addOperation(op0)
queue.addOperation(op1)
queue.addOperation(op2)
queue.addOperation(op3)
queue.suspended = false
queue.waitUntilAllOperationsAreFinished()
let aaa: DataType = a0 + a1 + a2 + a3
return aaa
}
}
And I have a shader that looks like this:
kernel void parsum(const device DataType* data [[ buffer(0) ]],
const device uint& dataLength [[ buffer(1) ]],
device DataType* sums [[ buffer(2) ]],
const device uint& elementsPerSum [[ buffer(3) ]],
const uint tgPos [[ threadgroup_position_in_grid ]],
const uint tPerTg [[ threads_per_threadgroup ]],
const uint tPos [[ thread_position_in_threadgroup ]]) {
uint resultIndex = tgPos * tPerTg + tPos; // This is the index of the individual result, this var is unique to this thread
uint dataIndex = resultIndex * elementsPerSum; // Where the summation should begin
uint endIndex = dataIndex + elementsPerSum < dataLength ? dataIndex + elementsPerSum : dataLength; // The index where summation should end
for (; dataIndex < endIndex; dataIndex++)
sums[resultIndex] += data[dataIndex];
}
On my surprise function sumParallel4 is the fastest, which I thought it shouldn't be. I noticed that when I call functions sumParralel and sumParallel3, the first function is always slower even if I change the order of function. (So if I call sumParralel first this is slower, if I call sumParallel3 this is slower.).
Why is this? Why is sumParallel3 not a lot faster than sumParallel ? Why is sumParallel4 the fastest, although it is calculated on CPU?
How can I update my GPU function with posix_memalign ? I know it should work faster because it would have shared memory between GPU and CPU, but I don't know witch array should be allocated this way (data or result) and how can I allocate data with posix_memalign if data is parameter passed in function?
In running these tests on an iPhone 6, I saw the Metal version run between 3x slower and 2x faster than the naive CPU summation. With the modifications I describe below, it was consistently faster.
I found that a lot of the cost in running the Metal version could be attributed not merely to the allocation of the buffers, though that was significant, but also to the first-time creation of the device and compute pipeline state. These are actions you'd normally perform once at application initialization, so it's not entirely fair to include them in the timing.
It should also be noted that if you're running these tests through Xcode with the Metal validation layer and GPU frame capture enabled, that has a significant run-time cost and will skew the results in the CPU's favor.
With those caveats, here's how you might use posix_memalign to allocate memory that can be used to back a MTLBuffer. The trick is to ensure that the memory you request is in fact page-aligned (i.e. its address is a multiple of getpagesize()), which may entail rounding up the amount of memory beyond how much you actually need to store your data:
let dataCount = 1_000_000
let dataSize = dataCount * strideof(DataType)
let pageSize = Int(getpagesize())
let pageCount = (dataSize + (pageSize - 1)) / pageSize
var dataPointer: UnsafeMutablePointer<Void> = nil
posix_memalign(&dataPointer, pageSize, pageCount * pageSize)
let data = UnsafeMutableBufferPointer(start: UnsafeMutablePointer<DataType>(dataPointer),
count: (pageCount * pageSize) / strideof(DataType))
for i in 0..<dataCount {
data[i] = 200
}
This does require making data an UnsafeMutableBufferPointer<DataType>, rather than an [DataType], since Swift's Array allocates its own backing store. You'll also need to pass along the count of data items to operate on, since the count of the mutable buffer pointer has been rounded up to make the buffer page-aligned.
To actually create a MTLBuffer backed with this data, use the newBufferWithBytesNoCopy(_:length:options:deallocator:) API. It's crucial that, once again, the length you provide is a multiple of the page size; otherwise this method returns nil:
let roundedUpDataSize = strideof(DataType) * data.count
let dataBuffer = device.newBufferWithBytesNoCopy(data.baseAddress, length: roundedUpDataSize, options: [], deallocator: nil)
Here, we don't provide a deallocator, but you should free the memory when you're done using it, by passing the baseAddress of the buffer pointer to free().
I am writing an app in Swift which employs the Scandit barcode scanning SDK. The SDK permits you to access camera frames directly and provides the frame as a CMSampleBuffer. They provide documentation in Objective-C, which I am having trouble getting to work in Swift. I do not know if the problem is in porting the code, or if there is something amiss with the sample buffer itself, perhaps due to a change in Core Media since their documentation was generated.
Their API exposes the frame as follows (Objective-C):
interface YourViewController () <SBSProcessFrameDelegate>
...
- (void)barcodePicker:(SBSBarcodePicker*)barcodePicker
didProcessFrame:(CMSampleBufferRef)frame
session:(SBSScanSession*)session {
// Process the frame yourself.
}
Building from several answers here on SO, I attempt to process the frame with:
let imageBuffer = CMSampleBufferGetImageBuffer(frame)!
CVPixelBufferLockBaseAddress(imageBuffer, 0)
let baseAddress = CVPixelBufferGetBaseAddress(imageBuffer)
let width = CVPixelBufferGetWidth(imageBuffer)
let height = CVPixelBufferGetHeight(imageBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.NoneSkipFirst.rawValue | CGBitmapInfo.ByteOrder32Little.rawValue)
let context = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo.rawValue)
let quartzImage = CGBitmapContextCreateImage(context)
CVPixelBufferUnlockBaseAddress(imageBuffer,0)
let image = UIImage(CGImage: quartzImage!)
But, this fails with:
Jan 29 09:01:30 Scandit[1308] <Error>: CGBitmapContextCreate: invalid data bytes/row: should be at least 7680 for 8 integer bits/component, 3 components, kCGImageAlphaNoneSkipFirst.
Jan 29 09:01:30 Scandit[1308] <Error>: CGBitmapContextCreateImage: invalid context 0x0. If you want to see the backtrace, please set CG_CONTEXT_SHOW_BACKTRACE environmental variable.
fatal error: unexpectedly found nil while unwrapping an Optional value
The fatal error is in attempting to resolve a UIImage from quartzImage.
The width, height, and bytesPerRow are (at the base address):
Width: 1920
Height: 1080
Bytes per row: 2904
As passed from the delegate, here is what the buffer contains according to CMSampleBufferGetFormatDescription(frame):
Optional(<CMVideoFormatDescription 0x1447dafa0 [0x1a1864b68]> {
mediaType:'vide'
mediaSubType:'420f'
mediaSpecific: {
codecType: '420f' dimensions: 1920 x 1080
}
extensions: {<CFBasicHash 0x1447dba10 [0x1a1864b68]>{type = immutable dict, count = 6,
entries =>
0 : <CFString 0x19d28b678 [0x1a1864b68]>{contents = "CVImageBufferYCbCrMatrix"} = <CFString 0x19d28b6b8 [0x1a1864b68]>{contents = "ITU_R_601_4"}
1 : <CFString 0x19d28b7d8 [0x1a1864b68]>{contents = "CVImageBufferTransferFunction"} = <CFString 0x19d28b698 [0x1a1864b68]>{contents = "ITU_R_709_2"}
2 : <CFString 0x19d2b65c0 [0x1a1864b68]>{contents = "CVBytesPerRow"} = <CFNumber 0xb00000000000b582 [0x1a1864b68]>{value = +2904, type = kCFNumberSInt32Type}
3 : <CFString 0x19d2b6640 [0x1a1864b68]>{contents = "Version"} = <CFNumber 0xb000000000000022 [0x1a1864b68]>{value = +2, type = kCFNumberSInt32Type}
5 : <CFString 0x19d28b758 [0x1a1864b68]>{contents = "CVImageBufferColorPrimaries"} = <CFString 0x19d28b698 [0x1a1864b68]>{contents = "ITU_R_709_2"}
6 : <CFString 0x19d28b818 [0x1a1864b68]>{contents = "CVImageBufferChromaLocationTopField"} = <CFString 0x19d28b878 [0x1a1864b68]>{contents = "Center"}
}
}
})
I realize there may be multiple "planes" here, but even with:
let pixelBufferBytesPerRow0 = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, 0)
let pixelBufferBytesPerRow1 = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, 1)
Gives:
Pixel buffer bytes per row (Plane 0): 1920
Pixel buffer bytes per row (Plane 1): 1920
I don't understand that discrepancy.
I also attempted to process each pixel individually as it is clear the buffer contains some manner of YCbCr, but it fails every way I have tried. The Scandit API suggest (Objective-C):
// Get the buffer info for the YCbCrBiPlanar format.
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
CVPlanarPixelBufferInfo_YCbCrBiPlanar *bufferInfo = (CVPlanarPixelBufferInfo_YCbCrBiPlanar *)baseAddress;
But, I cannot find a Swift implementation that permits access to the buffer info using CVPlanarPixelBufferInfo... everything I have tried fails, so I am unable to determine the offset for "Y", "Cr", etc.
How can I access the pixel data in the buffer? Is this a problem with the CMSampleBuffer the SDK is passing, a problem with iOS9, or both?
Working from Codo's "hints" and integrating with Objective-C code in the Scandit documentation, I worked out a solution in Swift. Though I accepted Codo's answer as it helped tremendously, I'm also answering my own question in the hopes that a complete solution would help someone in the future:
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!
CVPixelBufferLockBaseAddress(pixelBuffer, 0)
let lumaBaseAddress = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0)
let chromaBaseAddress = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let lumaBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0)
let chromaBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1)
let lumaBuffer = UnsafeMutablePointer<UInt8>(lumaBaseAddress)
let chromaBuffer = UnsafeMutablePointer<UInt8>(chromaBaseAddress)
var rgbaImage = [UInt8](count: 4*width*height, repeatedValue: 0)
for var x = 0; x < width; x++ {
for var y = 0; y < height; y++ {
let lumaIndex = x+y*lumaBytesPerRow
let chromaIndex = (y/2)*chromaBytesPerRow+(x/2)*2
let yp = lumaBuffer[lumaIndex]
let cb = chromaBuffer[chromaIndex]
let cr = chromaBuffer[chromaIndex+1]
let ri = Double(yp) + 1.402 * (Double(cr) - 128)
let gi = Double(yp) - 0.34414 * (Double(cb) - 128) - 0.71414 * (Double(cr) - 128)
let bi = Double(yp) + 1.772 * (Double(cb) - 128)
let r = UInt8(min(max(ri,0), 255))
let g = UInt8(min(max(gi,0), 255))
let b = UInt8(min(max(bi,0), 255))
rgbaImage[(x + y * width) * 4] = b
rgbaImage[(x + y * width) * 4 + 1] = g
rgbaImage[(x + y * width) * 4 + 2] = r
rgbaImage[(x + y * width) * 4 + 3] = 255
}
}
let colorSpace = CGColorSpaceCreateDeviceRGB()
let dataProvider: CGDataProviderRef = CGDataProviderCreateWithData(nil, rgbaImage, 4 * width * height, nil)!
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.NoneSkipFirst.rawValue | CGBitmapInfo.ByteOrder32Little.rawValue)
let cgImage: CGImageRef = CGImageCreate(width, height, 8, 32, width * 4, colorSpace!, bitmapInfo, dataProvider, nil, true, CGColorRenderingIntent.RenderingIntentDefault)!
let image: UIImage = UIImage(CGImage: cgImage)
CVPixelBufferUnlockBaseAddress(pixelBuffer,0)
Despite iterating through the entire 8.3MP image, the code executes very quickly. I freely admit that I don't have a deep understanding of Core Media frameworks, but I believe this means the code is executing on the GPU. But, I would appreciate any comments on the code to make it more efficient, or to improve the "Swiftness" as I am completely an amateur.
This is not a complete answer, just some hints:
Scandit uses the YCbCrBiPlanar format. It has a Y byte for each pixel and a Cb and a Cr byte for each group of 2x2 pixels. The Y values are on the first plane, the Cb and Cr values on the second plane.
If the image is w x h pixels large, then the first plane contains h rows of w bytes (and maybe some padding for each line).
The second plane contains h / 2 lines of w / 2 pairs of byte. Each pair consists of a Cb and Cr value. Again each line might have some padding at the end.
So the value of Y for the pixel at position (x, y) can be found at the address:
Y: baseAddressPlane1 + y * bytesPerRowPlane1 + x
And the value Cb and Cr for the pixel at position (x, y) can be found at the address:
Cb: baseAddressPlane2 + (y / 2) * bytesPerRowPlan2 + (x / 2) * 2
Cr: baseAddressPlane2 + (y / 2) * bytesPerRowPlan2 + (x / 2) * 2 + 1
The divisions by 2 are integer divisions that discard the fractional part.
I've seen a lot of questions converting hex to int, but these are all of the unsigned-> unsigned variety. How could I convert signed hex to an Int?
eg.
somefunc('0xfffff830')
= -2000
Your question implies that you are dealing with 32-bit signed integers
(otherwise 0xfffff830 could not be considered as negative),
so this would work:
let num = "0xfffff830"
let x = Int32(truncatingBitPattern: strtoul(num, nil, 16))
println(x) // -2000
strtoul() converts the hex string to an unsigned integer UInt, and
Int32(truncatingBitPattern:) creates a (signed) 32-bit integer
from the lowest 32 bits of the given argument.
Updated for Swift 4:
let num = "0xfffff830"
let x = Int32(bitPattern: UInt32(num.dropFirst(2), radix: 16) ?? 0)
print(x) // -2000
You could use conversion to unsigned and then convert the unsigned to signed.
let num = "0xfffff830"
var result: UInt32 = 0
let converter = NSScanner(string: num)
converter.scanHexInt(&result)
print(unsafeBitCast(result, Int32.self)) // prints -2000
An approach
var hex = UInt32(0xfffff830)
let signedHex : Int
if hex > UInt32.max / 2 {
signedHex = -Int(~hex + 1) // ~ is the Bitwise NOT Operator
} else {
signedHex = Int(hex)
}
The following produces the below error:
int calc_ranks(ranks)
{
double multiplier = .5;
return multiplier * ranks;
}
The return type double is not a int, as defined by the method calc_ranks. How do I round/cast to an int?
Round it using the round() method:
int calc_ranks(ranks) {
double multiplier = .5;
return (multiplier * ranks).round();
}
You can use any of the following.
double d = 20.5;
int i = d.toInt(); // i = 20
int i = d.round(); // i = 21
int i = d.ceil(); // i = 21
int i = d.floor(); // i = 20
You can simply use toInt() to convert a num to an int.
int calc_ranks(ranks)
{
double multiplier = .5;
return (multiplier * ranks).toInt();
}
Note that to do exactly the same thing you can use the Truncating division operator :
int calc_ranks(ranks) => ranks ~/ 2;
I see a lot of answers, but with less description. Hope my answer will add some value.
Lets initalize the variable, and see how it will change with different methods.
double x = 8.5;
toInt()
It truncates the decimal value.
int a = x.toInt();
print(a); // 8
truncate()
It also truncates the decimal value.
int b = x.truncate();
print(b); // 8
round()
It returns the closest integer. It uses half up rounding mode.
int c = x.round();
print(c); // 9
ceil()
It returns the closest integer greater than the value.
int c = x.ceil();
print(c); // 9
floor()
It returns the closest integer smaller than the value.
int c = x.floor();
print(c); // 8
I looked at the answers above and added some more answers to make it a little easier to understand.
double value = 10.5;
Using toInt()
void main(){
double value = 10.5;
var y = value.toInt();
print(y);
print(y.runtimeType);
}
Using round()
The round() method returns the closest integer to the double.
void main(){
double value = 9.6;
var b = value.round();
print(b);
print(b.runtimeType);
}
Using ceil()
The ceil() method returns the smallest integer that is equal or greater than the given double.
void main(){
double value = 9.5;
var d = value.ceil();
print(d);
print(d.runtimeType);
}
Using floor()
The floor() method returns the greatest integer not greater than the given double.
void main(){
double value = 10.9;
var j = value.floor();
print(j);
print(j.runtimeType);
}
Conclusion
We’ve gone through 4 different techniques to convert a double to an integer in Dart and Flutter. You can choose from the method that fits your use case to solve your problem. Flutter is awesome and provides a lot of amazing features.
To convert double to int just this:
division
double01 ~/ double02
PS: The operator x ~/ y is more efficient than (x / y).toInt().
Multiplication, addition and subtraction
(double01 * double02).toInt
(double01 + double02).toInt
(double01 - double02).toInt
Its easy,
(20.8).round()
For String,
double.tryParse(20.8).round()
from string to int ->
if you string in int format like '10'
then use ---->
int.parse(value)
but if string in double format like '10.6'
then use like this ---->
double.parse(value).toInt()
convert double to int
doubleValue.toInt()
Try this!
int calc_ranks(ranks)
{
double multiplier = .5;
return (multiplier * ranks).truncate();
}
class CurrencyUtils{
static int doubletoint(double doublee) {
double multiplier = .5;
return (multiplier * doublee).round();
}
}
----------------------
CustomText( CurrencyUtils.doubletoint(
double.parse(projPageListss[0].budget.toString())
).toString(),
fontSize: 20,
color: Colors.white,
font: Font.QuicksandSemiBold,
),
There's another alternative, you can first cast the double to 'num' datatype and then convert to int using toInt().
double multiplier = .5;
return ((multiplier * ranks) as num).toInt();
The num type is an inherited data type of the int and double types.
You can cast both int and double to num, then cast it again to whatever you want
(double -> use toDouble(), int -> use toInt())