Metal Multisample not resolving - metal

I've written some simple multisampled rendering in Metal. It's just drawing a single solid colored quad. After rendering I read the contents of the resolve texture. This works on Intel and M1 but fails on AMD and NVidia.
Any idea what I'm doing wrong? Metal's API Validation doesn't complain about anything :(
//
// Renderer.swift
// metaltest
//
import Foundation
import Metal
import MetalKit
class Renderer : NSObject, MTKViewDelegate {
let device: MTLDevice
let commandQueue: MTLCommandQueue
let pipelineState: MTLRenderPipelineState
let vertexBuffer: MTLBuffer
let texture: MTLTexture
let resolveTexture: MTLTexture
let width = 16;
let height = 16;
//let samplerState: MTLSamplerState
var frameCount: Int = 0
// This is the initializer for the Renderer class.
// We will need access to the mtkView later, so we add it as a parameter here.
init?(mtkView: MTKView) {
device = mtkView.device!
mtkView.framebufferOnly = true
commandQueue = device.makeCommandQueue()!
// Create the Render Pipeline
do {
pipelineState = try Renderer.buildRenderPipelineWith(device: device, metalKitView: mtkView)
} catch {
print("Unable to compile render pipeline state: \(error)")
return nil
}
// Create our vertex data
let vertices = [
Vertex(pos: [-1, -1]),
Vertex(pos: [ 1, -1]),
Vertex(pos: [-1, 1]),
Vertex(pos: [-1, 1]),
Vertex(pos: [ 1, -1]),
Vertex(pos: [ 1, 1]),
]
// And copy it to a Metal buffer...
vertexBuffer = device.makeBuffer(bytes: vertices, length: vertices.count * MemoryLayout<Vertex>.stride, options: [])!
print("texture size: width: \(width), height: \(height)")
let textureDescriptor = MTLTextureDescriptor.texture2DDescriptor(
pixelFormat: MTLPixelFormat.rgba8Unorm,
width: width,
height: height,
mipmapped: false)
textureDescriptor.sampleCount = 4
textureDescriptor.usage = [.renderTarget]
textureDescriptor.textureType = .type2DMultisample
textureDescriptor.storageMode = .private
texture = device.makeTexture(descriptor: textureDescriptor)!
let resolveTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(
pixelFormat: MTLPixelFormat.rgba8Unorm,
width: width,
height: height,
mipmapped: false)
resolveTextureDescriptor.usage = [.renderTarget]
resolveTexture = device.makeTexture(descriptor: resolveTextureDescriptor)!
}
// Create our custom rendering pipeline, which loads shaders using `device`, and outputs to the format of `metalKitView`
class func buildRenderPipelineWith(device: MTLDevice, metalKitView: MTKView) throws -> MTLRenderPipelineState {
// Create a new pipeline descriptor
let pipelineDescriptor = MTLRenderPipelineDescriptor()
// Setup the shaders in the pipeline
let library = device.makeDefaultLibrary()
pipelineDescriptor.vertexFunction = library?.makeFunction(name: "vertexShader")
pipelineDescriptor.fragmentFunction = library?.makeFunction(name: "fragmentShader")
// Setup the output pixel format to match the pixel format of the metal kit view
pipelineDescriptor.colorAttachments[0].pixelFormat = MTLPixelFormat.rgba8Unorm;
pipelineDescriptor.sampleCount = 4;
// Compile the configured pipeline descriptor to a pipeline state object
return try device.makeRenderPipelineState(descriptor: pipelineDescriptor)
}
// mtkView will automatically call this function
// whenever it wants new content to be rendered.
func draw(in view: MTKView) {
guard let commandBuffer = commandQueue.makeCommandBuffer() else { return }
let renderPassDescriptor = MTLRenderPassDescriptor(); // view.currentRenderPassDescriptor else { return }
renderPassDescriptor.colorAttachments[0].texture = texture;
renderPassDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(1, 1, 1, 1)
renderPassDescriptor.colorAttachments[0].resolveTexture = resolveTexture;
renderPassDescriptor.colorAttachments[0].storeAction = .storeAndMultisampleResolve
guard let renderEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor) else { return }
renderEncoder.setRenderPipelineState(pipelineState)
renderEncoder.setVertexBuffer(vertexBuffer, offset: 0, index: 0)
renderEncoder.drawPrimitives(type: .triangle, vertexStart: 0, vertexCount: 6)
renderEncoder.endEncoding()
commandBuffer.commit()
commandBuffer.waitUntilCompleted()
let pixelCount = width * height
let region = MTLRegionMake2D(0, 0, width, height)
var pixels = Array<UInt8>(repeating: UInt8(0), count: pixelCount * 4)
resolveTexture.getBytes(
&pixels,
bytesPerRow: width * 4,
from: region,
mipmapLevel: 0);
print("dest size: width: \(width), height: \(height)")
print("Top Left : \(String(format:"%02X", pixels[0])), \(String(format:"%02X", pixels[1])), \(String(format:"%02X", pixels[2])), \(String(format:"%02X", pixels[3])), expected: (0x80, 0x99, 0xB2, 0xCC)")
let offset = width * height * 4 - 4;
print("Bottom Right: \(String(format:"%02X", pixels[offset])), \(String(format:"%02X", pixels[offset + 1])), \(String(format:"%02X", pixels[offset + 2])), \(String(format:"%02X", pixels[offset + 3])), expected: (0x80, 0x99, 0xB2, 0xCC)")
exit(0)
}
// mtkView will automatically call this function
// whenever the size of the view changes (such as resizing the window).
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {
}
}
Shader
#include <metal_stdlib>
#include "ShaderDefinitions.h"
using namespace metal;
struct VertexOut {
float4 pos [[position]];
};
vertex VertexOut vertexShader(const device Vertex *vertexArray [[buffer(0)]], unsigned int vid [[vertex_id]])
{
Vertex in = vertexArray[vid];
VertexOut out;
out.pos = float4(in.pos.xy, 0, 1);
return out;
}
fragment float4 fragmentShader()
{
return float4(0.5, 0.6, 0.7, 0.8);
}
ShaderDefinitions.h
#ifndef ShaderDefinitions_h
#define ShaderDefinitions_h
#include <simd/simd.h>
struct Vertex {
vector_float2 pos;
};
#endif /* ShaderDefinitions_h */
The output I expect is:
Top Left : 80, 99, B2, CC, expected: (0x80, 0x99, 0xB2, 0xCC)
Bottom Right: 80, 99, B2, CC, expected: (0x80, 0x99, 0xB2, 0xCC)
Which is what I get on Intel and M1 but on AMD and NVidia I get
Top Left : 00, 00, 00, 00, expected: (0x80, 0x99, 0xB2, 0xCC)
Bottom Right: 00, 00, 00, 00, expected: (0x80, 0x99, 0xB2, 0xCC)

[Intel, Apple M1] - unified memory model
[Nvidia, AMD] - discrete memory model
Understand the Private Mode
A resource with a MTLStorageModePrivate mode is accessible only to the
GPU. In a unified memory model, this resource resides in system
memory. In a discrete memory model, it resides in video memory.
Use this implementation to copy texture data from a Private Texture to a Shared Buffer.

The issue was I needed to call synchronize in order to make the data available to the GPU
guard let blitEncoder = commandBuffer.makeBlitCommandEncoder() else { return }
blitEncoder.synchronize(texture: resolveTexture, slice: 0, level: 0);
blitEncoder.endEncoding();
Inserting that code before commandBuffer.commit() in the code from the question solved the issue

Related

Converting an MLMultiArray to an image or an OpenGL / metal texture

I'm trying to do background segmentation of a live video using CoreML. I used DeepLabV3 as provided by Apple. The model works ok, even though it already takes 100ms to process a 513x513 image. I then want to display the output, which is a 513x513 array of int32. Converting it in an image as done in CoreMLHelpers takes 300ms and I'm looking for a much faster way to display the results. I was thinking that maybe it'd be faster to somehow dump this to a OpenGL or Metal texture.
What is the best way to handle MLMultiArray for live inputs?
My answer is based on processing the MLMultiArray in Metal
Create an MTLBuffer:
let device = MTLCreateSystemDefaultDevice()!
let segmentationMaskBuffer: MTLBuffer = self.device.makeBuffer(length: segmentationHeight * segmentationWidth * MemoryLayout<Int32>.stride)
Copy MLMultiArray to MTLBuffer:
memcpy(segmentationMaskBuffer.contents(), mlOutput.semanticPredictions.dataPointer, segmentationMaskBuffer.length)
Setup Metal related variables:
let commandQueue = device.makeCommandQueue()!
let library = device.makeDefaultLibrary()!
let function = library.makeFunction(name: "binaryMask")!
let computePipeline = try! device.makeComputePipelineState(function: function)
create a struct for segmentation size:
let segmentationWidth = 513
let segmentationHeight = 513
struct MixParams {
var width: Int32 = Int32(segmentationWidth)
var height: Int32 = Int32(segmentationHeight)
}
create a output texture:
let textureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .bgra8Unorm, width: width, height: height, mipmapped: false)
textureDescriptor.usage = [.shaderRead, .shaderWrite]
let outputTexture = device.makeTexture(descriptor: textureDescriptor)!
pass the mtlbuffer, outputtexture to the kernal function:
let buffer = commandQueue.makeCommandBuffer()!
let maskCommandEncoder = buffer.makeComputeCommandEncoder()!
maskCommandEncoder.setTexture(outputTexture, index: 1)
maskCommandEncoder.setBuffer(segmentationBuffer, offset: 0, index: 0)
maskCommandEncoder.setBytes(&params, length: MemoryLayout<MixParams>.size, index: 1)
let w = computePipeline.threadExecutionWidth
let h = computePipeline.maxTotalThreadsPerThreadgroup / w
let threadGroupSize = MTLSizeMake(w, h, 1)
let threadGroups = MTLSizeMake(
(depthWidth + threadGroupSize.width - 1) / threadGroupSize.width,
(depthHeight + threadGroupSize.height - 1) / threadGroupSize.height, 1)
maskCommandEncoder.setComputePipelineState(computePipeline)
maskCommandEncoder.dispatchThreadgroups(threadGroups, threadsPerThreadgroup: threadGroupSize)
maskCommandEncoder.endEncoding()
write your kernel function in Shaders.metal file:
#include <metal_stdlib>
using namespace metal;
#include <CoreImage/CoreImage.h>
struct MixParams {
int segmentationWidth;
int segmentationHeight;
};
static inline int get_class(float2 pos, int width, int height, device int* mask) {
const int x = int(pos.x * width);
const int y = int(pos.y * height);
return mask[y*width + x];
}
static float get_person_probability(float2 pos, int width, int height, device int* mask) {
return get_class(pos, width, height, mask) == 15;
}
kernel void binaryMask(
texture2d<float, access::write> outputTexture [[texture(1)]],
device int* segmentationMask [[buffer(0)]],
constant MixParams& params [[buffer(1)]],
uint2 gid [[thread_position_in_grid]])
{
float width = outputTexture.get_width();
float height = outputTexture.get_height();
if (gid.x >= width ||
gid.y >= height) return;
const float2 pos = float2(float(gid.x) / width,
float(gid.y) / height);
const float is_person = get_person_probability(pos, params.segmentationWidth,
params.segmentationHeight,
segmentationMask);
float4 outPixel;
if (is_person < 0.5f) {
outPixel = float4(0.0,0.0,0.0,0.0);
} else {
outPixel = float4(1.0,1.0,1.0,1.0);
}
outputTexture.write(outPixel, gid);
}
Finally get the ciimage from output texture after encoding is finished:
let kciOptions: [CIImageOption: Any] = [CIImageOption.colorSpace: CGColorSpaceCreateDeviceRGB()]
let maskIMage = CIImage(mtlTexture: outputTexture,options: kciOptions)!.oriented(.downMirrored)
Instead of outputting an MLMultiArray you can change the model to make it output an image of type CVPixelBuffer. Then you can use CVMetalTextureCacheCreateTextureFromImage to turn the pixel buffer into an MTLTexture. (I think this works but I don't recall if I ever tried it. Not all pixel buffer objects can be turned into textures and I'm not sure if Core ML outputs a CVPixelBuffer object with the "Metal compatibility flag" turned on.)
Alternatively, you can write a compute kernel that takes in the MLMultiArray and converts it to a texture, which then gets drawn into a Metal view. This has the advantage that you apply all kinds of effects to the segmentation map in the compute kernel at the same time.

Swift 3 CGContext Memory Leak

I'm using a CGBitMapContext() to convert colour spaces to ARGB and get the pixel data values, I malloc space for bit map context and free it after I'm done but am still seeing a Memory Leak in Instruments I'm thinking I'm likely doing something wrong so any help would be appreciated.
Here is the ARGBBitmapContext function
func createARGBBitmapContext(width: Int, height: Int) -> CGContext {
var bitmapByteCount = 0
var bitmapBytesPerRow = 0
//Get image width, height
let pixelsWide = width
let pixelsHigh = height
bitmapBytesPerRow = Int(pixelsWide) * 4
bitmapByteCount = bitmapBytesPerRow * Int(pixelsHigh)
let colorSpace = CGColorSpaceCreateDeviceRGB()
// Here is the malloc call that Instruments complains of
let bitmapData = malloc(bitmapByteCount)
let context = CGContext(data: bitmapData, width: pixelsWide, height: pixelsHigh, bitsPerComponent: 8, bytesPerRow: bitmapBytesPerRow, space: colorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
// Do I need to free something here first?
return context!
}
Here is where I use the context to retrieve all the pixel values as a list of UInt8s (and where the memory leaks)
extension UIImage {
func ARGBPixelValues() -> [UInt8] {
let width = Int(self.size.width)
let height = Int(self.size.height)
var pixels = [UInt8](repeatElement(0, count: width * height * 3))
let rect = CGRect(x: 0, y: 0, width: width, height: height)
let context = createARGBBitmapContext(inImage: self.cgImage!)
context.clear(rect)
context.draw(self.cgImage!, in: rect)
var location = 0
if let data = context.data {
while location < (width * height) {
let arrOffset = 3 * location
let offset = 4 * (location)
let R = data.load(fromByteOffset: offset + 1, as: UInt8.self)
let G = data.load(fromByteOffset: offset + 2, as: UInt8.self)
let B = data.load(fromByteOffset: offset + 3, as: UInt8.self)
pixels[arrOffset] = R
pixels[arrOffset+1] = G
pixels[arrOffset+2] = B
location += 1
}
free(context.data) // Free the data consumed, perhaps this isn't right?
}
return pixels
}
}
Instruments reports a malloc error of 1.48MiB which is right for my image size (540 x 720) I free the data but apparently that is not right.
I should mention that I know you can pass nil to CGContext init (and it will manage memory) but I'm more curious why using malloc creates an issue is there something more I should know (I'm more familiar with Obj-C).
Because CoreGraphics is not handled by ARC (like all other C libraries), you need to wrap your code with with an autorelease, even in Swift. Particularly if you are not on the main thread (which you should not be, if CoreGraphics is involved... .userInitiated or lower is appropriate).
func myFunc() {
for _ in 0 ..< makeMoneyFast {
autoreleasepool {
// Create CGImageRef etc...
// Do Stuff... whir... whiz... PROFIT!
}
}
}
For those that care, your Objective-C should also be wrapped like:
BOOL result = NO;
NSMutableData* data = [[NSMutableData alloc] init];
#autoreleasepool {
CGImageRef image = [self CGImageWithResolution:dpi
hasAlpha:hasAlpha
relativeScale:scale];
NSAssert(image != nil, #"could not create image for TIFF export");
if (image == nil)
return nil;
CGImageDestinationRef destRef = CGImageDestinationCreateWithData((CFMutableDataRef)data, kUTTypeTIFF, 1, NULL);
CGImageDestinationAddImage(destRef, image, (CFDictionaryRef)options);
result = CGImageDestinationFinalize(destRef);
CFRelease(destRef);
}
if (result) {
return [data copy];
} else {
return nil;
}
See this answer for details.

GLKit texture jagged

I'm trying implement GLKit texture painting, and it's looks too jagged
Creating texture from .png, got this code from Apple's GLPaing example
private func texture(fromName name: String) -> textureInfo_t {
var texId: GLuint = 0
var texture: textureInfo_t = (0, 0, 0)
let brushImage = UIImage(named: name)!.cgImage!
let width: size_t = brushImage.width
let height: size_t = brushImage.height
var brushData = [GLubyte](repeating: 0, count: width * height * 4)
let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue
let brushContext = CGContext(data: &brushData, width: width, height: height, bitsPerComponent: 8, bytesPerRow: width * 4, space: (brushImage.colorSpace!), bitmapInfo: bitmapInfo)
brushContext?.draw(brushImage, in: CGRect(x: 0.0, y: 0.0, width: width.g, height: height.g))
glGenTextures(1, &texId)
// Bind the texture name.
glBindTexture(GL_TEXTURE_2D.ui, texId)
// Set the texture parameters to use a minifying filter and a linear filer (weighted average)
glTexParameteri(GL_TEXTURE_2D.ui, GL_TEXTURE_MIN_FILTER.ui, GL_LINEAR)
// Specify a 2D texture image, providing the a pointer to the image data in memory
glTexImage2D(GL_TEXTURE_2D.ui, 0, GL_RGBA, width.i, height.i, 0, GL_RGBA.ui, GL_UNSIGNED_BYTE.ui, brushData)
// Release the image data; it's no longer needed
texture.id = texId
texture.width = width.i
texture.height = height.i
return texture
}
and rendering while painting
private func renderLine(from _start: CGPoint, to _end: CGPoint) {
struct Static {
static var vertexBuffer: [GLfloat] = []
}
var count = 0
EAGLContext.setCurrent(context)
glBindFramebuffer(GL_FRAMEBUFFER.ui, viewFramebuffer)
// Convert locations from Points to Pixels
let scale = self.contentScaleFactor
var start = _start
start.x *= scale
start.y *= scale
var end = _end
end.x *= scale
end.y *= scale
// Allocate vertex array buffer
// Add points to the buffer so there are drawing points every X pixels
count = max(Int(ceilf(sqrtf((end.x - start.x).f * (end.x - start.x).f + (end.y - start.y).f * (end.y - start.y).f) / kBrushPixelStep.f)), 1)
Static.vertexBuffer.reserveCapacity(count * 2)
Static.vertexBuffer.removeAll(keepingCapacity: true)
for i in 0..<count {
Static.vertexBuffer.append(start.x.f + (end.x - start.x).f * (i.f / count.f))
Static.vertexBuffer.append(start.y.f + (end.y - start.y).f * (i.f / count.f))
}
// Load data to the Vertex Buffer Object
glBindBuffer(GL_ARRAY_BUFFER.ui, vboId)
glBufferData(GL_ARRAY_BUFFER.ui, count*2*MemoryLayout<GLfloat>.size, Static.vertexBuffer, GL_DYNAMIC_DRAW.ui)
glEnableVertexAttribArray(ATTRIB_VERTEX.ui)
glVertexAttribPointer(ATTRIB_VERTEX.ui, 2, GL_FLOAT.ui, GL_FALSE.ub, 0, nil)
// Draw
glUseProgram(program[PROGRAM_POINT].id)
glDrawArrays(GL_POINTS.ui, 0, count.i)
// Display the buffer
glBindRenderbuffer(GL_RENDERBUFFER.ui, viewRenderbuffer)
context.presentRenderbuffer(GL_RENDERBUFFER.l)
}
How could I improve texture quality?
UPDATE:
Even with bigger resolution .png result same. What is wrong am I doing?
There 1024x1024 .png with transparent background that I'm using:

Indexed drawing with metal

I am trying to load a model (form .OBJ) and draw it to the screen on iOS with MetalKit. The problem is that instead of my model, I get some random polygons...
Here is the code that is tend to load the model(The code is based on a tutorial from raywenderlich.com:
let allocator = MTKMeshBufferAllocator(device: device)
let vertexDescriptor = MDLVertexDescriptor()
let vertexLayout = MDLVertexBufferLayout()
vertexLayout.stride = sizeof(Vertex)
vertexDescriptor.layouts = [vertexLayout]
vertexDescriptor.attributes = [MDLVertexAttribute(name: MDLVertexAttributePosition, format: MDLVertexFormat.Float3, offset: 0, bufferIndex: 0),
MDLVertexAttribute(name: MDLVertexAttributeColor, format: MDLVertexFormat.Float4, offset: sizeof(float3), bufferIndex: 0),
MDLVertexAttribute(name: MDLVertexAttributeTextureCoordinate, format: MDLVertexFormat.Float2, offset: sizeof(float3)+sizeof(float4), bufferIndex: 0),
MDLVertexAttribute(name: MDLVertexAttributeNormal, format: MDLVertexFormat.Float3, offset: sizeof(float3)+sizeof(float4)+sizeof(float2), bufferIndex: 0)]
var error: NSError?
let asset = MDLAsset(URL: path, vertexDescriptor: vertexDescriptor, bufferAllocator: allocator, preserveTopology: true, error: &error)
if error != nil{
print(error)
return nil
}
let model = asset.objectAtIndex(0) as! MDLMesh
let mesh = try MTKMesh(mesh: model, device: device)
And here is my drawing method:
func render(commandQueue: MTLCommandQueue, pipelineState: MTLRenderPipelineState,drawable: CAMetalDrawable,projectionMatrix: float4x4,modelViewMatrix: float4x4, clearColor: MTLClearColor){
dispatch_semaphore_wait(bufferProvider.availibleResourcesSemaphore, DISPATCH_TIME_FOREVER)
let renderPassDescriptor = MTLRenderPassDescriptor()
renderPassDescriptor.colorAttachments[0].texture = drawable.texture
renderPassDescriptor.colorAttachments[0].loadAction = .Clear
renderPassDescriptor.colorAttachments[0].clearColor = clearColor
renderPassDescriptor.colorAttachments[0].storeAction = .Store
let commandBuffer = commandQueue.commandBuffer()
commandBuffer.addCompletedHandler { (buffer) in
dispatch_semaphore_signal(self.bufferProvider.availibleResourcesSemaphore)
}
let renderEncoder = commandBuffer.renderCommandEncoderWithDescriptor(renderPassDescriptor)
renderEncoder.setCullMode(MTLCullMode.None)
renderEncoder.setRenderPipelineState(pipelineState)
renderEncoder.setVertexBuffer(vertexBuffer, offset: 0, atIndex: 0)
renderEncoder.setFragmentTexture(texture, atIndex: 0)
if let samplerState = samplerState{
renderEncoder.setFragmentSamplerState(samplerState, atIndex: 0)
}
var nodeModelMatrix = self.modelMatrix()
nodeModelMatrix.multiplyLeft(modelViewMatrix)
uniformBuffer = bufferProvider.nextUniformsBuffer(projectionMatrix, modelViewMatrix: nodeModelMatrix, light: light)
renderEncoder.setVertexBuffer(self.uniformBuffer, offset: 0, atIndex: 1)
renderEncoder.setFragmentBuffer(uniformBuffer, offset: 0, atIndex: 1)
if indexBuffer != nil{
renderEncoder.drawIndexedPrimitives(.Triangle, indexCount: self.indexCount, indexType: self.indexType, indexBuffer: self.indexBuffer!, indexBufferOffset: 0)
}else{
renderEncoder.drawPrimitives(.Triangle, vertexStart: 0, vertexCount: vertexCount, instanceCount: vertexCount/3)
}
renderEncoder.endEncoding()
commandBuffer.presentDrawable(drawable)
commandBuffer.commit()
}
Here is my vertex shader:
struct VertexIn{
packed_float3 position;
packed_float4 color;
packed_float2 texCoord;
packed_float3 normal;
};
struct VertexOut{
float4 position [[position]];
float3 fragmentPosition;
float4 color;
float2 texCoord;
float3 normal;
};
struct Light{
packed_float3 color;
float ambientIntensity;
packed_float3 direction;
float diffuseIntensity;
float shininess;
float specularIntensity;
};
struct Uniforms{
float4x4 modelMatrix;
float4x4 projectionMatrix;
Light light;
};
vertex VertexOut basic_vertex(
const device VertexIn* vertex_array [[ buffer(0) ]],
const device Uniforms& uniforms [[ buffer(1) ]],
unsigned int vid [[ vertex_id ]]) {
float4x4 mv_Matrix = uniforms.modelMatrix;
float4x4 proj_Matrix = uniforms.projectionMatrix;
VertexIn VertexIn = vertex_array[vid];
VertexOut VertexOut;
VertexOut.position = proj_Matrix * mv_Matrix * float4(VertexIn.position,1);
VertexOut.fragmentPosition = (mv_Matrix * float4(VertexIn.position,1)).xyz;
VertexOut.color = VertexIn.color;
VertexOut.texCoord = VertexIn.texCoord;
VertexOut.normal = (mv_Matrix * float4(VertexIn.normal, 0.0)).xyz;
return VertexOut;
}
And here is how it looks like:
link
Actually I have an other class that is completely written by me to load models. It works fine, the problem is that it is not using indexing so f I try to load models that are more complex than a low-poly sphere, the GPU crashes... Anyways I tried to modify it to use indexing and I got the same result.. than I added hardcoded indices for testing and I got a really weird result. When I had 3 indices it drew a triangle, when I added 3 more, it drew the same triangle and after 3 more vertices it drew 2 triangles...
Edit:
Here is my Vertex structure:
struct Vertex:Equatable{
var x,y,z: Float
var r,g,b,a: Float
var s,t: Float
var nX,nY,nZ:Float
func floatBuffer()->[Float]{
return [x,y,z,r,g,b,a,s,t,nX,nY,nZ]
}
}
I see a couple of potential issues here.
1) Your vertex descriptor does not map exactly to your Vertex struct. The position variables (x, y, z) occupy 12 bytes, so the color variables start at an offset of 12 bytes. This matches the packed_float3 position field in your shader's VertexIn struct, but in the vertex descriptor you provide to Model I/O, you use sizeof(Float3), which is 16, as the offset of the color attribute. Because you're packing the position field, you should use sizeof(Float) * 3 for this value instead, and likewise in the subsequent offsets. I suspect this is the main cause of your problems.
More generally, it's a good idea to use strideof rather than sizeof to account for alignment, though--by chance--it wouldn't make a difference here.
2) Model I/O is allowed to use a single MTLBuffer to store both vertices and indices, so you should use the offset member of each MTKMeshBuffer when setting the vertex buffer or specifying the index buffer in each draw call, rather than assuming the offsets to be 0.

Swift 2.2 - Count Black Pixels in UIImage

I need to count all the black pixels in UIImage. I have found a code that could work however it is written in Objective-C. I have tried to convert it in swift but I get lots of errors and I cannot find the way of fix them.
Whats the best way to do this using Swift?
Simple Image
Objective-C:
/**
* Structure to keep one pixel in RRRRRRRRGGGGGGGGBBBBBBBBAAAAAAAA format
*/
struct pixel {
unsigned char r, g, b, a;
};
/**
* Process the image and return the number of pure red pixels in it.
*/
- (NSUInteger) processImage: (UIImage*) image
{
NSUInteger numberOfRedPixels = 0;
// Allocate a buffer big enough to hold all the pixels
struct pixel* pixels = (struct pixel*) calloc(1, image.size.width * image.size.height * sizeof(struct pixel));
if (pixels != nil)
{
// Create a new bitmap
CGContextRef context = CGBitmapContextCreate(
(void*) pixels,
image.size.width,
image.size.height,
8,
image.size.width * 4,
CGImageGetColorSpace(image.CGImage),
kCGImageAlphaPremultipliedLast
);
if (context != NULL)
{
// Draw the image in the bitmap
CGContextDrawImage(context, CGRectMake(0.0f, 0.0f, image.size.width, image.size.height), image.CGImage);
// Now that we have the image drawn in our own buffer, we can loop over the pixels to
// process it. This simple case simply counts all pixels that have a pure red component.
// There are probably more efficient and interesting ways to do this. But the important
// part is that the pixels buffer can be read directly.
NSUInteger numberOfPixels = image.size.width * image.size.height;
while (numberOfPixels > 0) {
if (pixels->r == 255) {
numberOfRedPixels++;
}
pixels++;
numberOfPixels--;
}
CGContextRelease(context);
}
free(pixels);
}
return numberOfRedPixels;
}
Much faster is to use Accelerate's vImageHistogramCalculation to get a histogram of the different channels in your image:
let img: CGImage = CIImage(image: image!)!.cgImage!
let imgProvider: CGDataProvider = img.dataProvider!
let imgBitmapData: CFData = imgProvider.data!
var imgBuffer = vImage_Buffer(data: UnsafeMutableRawPointer(mutating: CFDataGetBytePtr(imgBitmapData)), height: vImagePixelCount(img.height), width: vImagePixelCount(img.width), rowBytes: img.bytesPerRow)
let alpha = [UInt](repeating: 0, count: 256)
let red = [UInt](repeating: 0, count: 256)
let green = [UInt](repeating: 0, count: 256)
let blue = [UInt](repeating: 0, count: 256)
let alphaPtr = UnsafeMutablePointer<vImagePixelCount>(mutating: alpha) as UnsafeMutablePointer<vImagePixelCount>?
let redPtr = UnsafeMutablePointer<vImagePixelCount>(mutating: red) as UnsafeMutablePointer<vImagePixelCount>?
let greenPtr = UnsafeMutablePointer<vImagePixelCount>(mutating: green) as UnsafeMutablePointer<vImagePixelCount>?
let bluePtr = UnsafeMutablePointer<vImagePixelCount>(mutating: blue) as UnsafeMutablePointer<vImagePixelCount>?
let rgba = [redPtr, greenPtr, bluePtr, alphaPtr]
let histogram = UnsafeMutablePointer<UnsafeMutablePointer<vImagePixelCount>?>(mutating: rgba)
let error = vImageHistogramCalculation_ARGB8888(&imgBuffer, histogram, UInt32(kvImageNoFlags))
After this runs, alpha, red, green, and blue are now histograms of the colors in your image. If red, green, and blue each only have count in the 0th spot, while alpha only has count in the last spot, your image is black.
If you want to not even check multiple arrays, you can use vImageMatrixMultiply to combine your different channels:
let readableMatrix: [[Int16]] = [
[3, 0, 0, 0]
[0, 1, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]
]
var matrix: [Int16] = [Int16](repeating: 0, count: 16)
for i in 0...3 {
for j in 0...3 {
matrix[(3 - j) * 4 + (3 - i)] = readableMatrix[i][j]
}
}
vImageMatrixMultiply_ARGB8888(&imgBuffer, &imgBuffer, matrix, 3, nil, nil, UInt32(kvImageNoFlags))
If you stick this in before the histograming, your imgBuffer will be modified in place to average the RGB in each pixel, writing the average out to the B channel. As such, you can just check the blue histogram instead of all three.
(btw, the best description of vImageMatrixMultiply I've found is in the source code, like at https://github.com/phracker/MacOSX-SDKs/blob/2d31dd8bdd670293b59869335d9f1f80ca2075e0/MacOSX10.7.sdk/System/Library/Frameworks/Accelerate.framework/Versions/A/Frameworks/vImage.framework/Versions/A/Headers/Transform.h#L21)
I ran into a similar issue now, where I needed to determine if an image was 100% black. The following code will return the number of pure black pixels it finds in an image.
However, if you want to bump the threshold up, you can change the compare value, and allow it to tolerate a wider range of possible colors.
import UIKit
extension UIImage {
var blackPixelCount: Int {
var count = 0
for x in 0..<Int(size.width) {
for y in 0..<Int(size.height) {
count = count + (isPixelBlack(CGPoint(x: CGFloat(x), y: CGFloat(y))) ? 1 : 0)
}
}
return count
}
private func isPixelBlack(_ point: CGPoint) -> Bool {
let pixelData = cgImage?.dataProvider?.data
let pointerData: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo = Int(((size.width * point.y) + point.x)) * 4
let maxValue: CGFloat = 255.0
let compare: CGFloat = 0.01
if (CGFloat(pointerData[pixelInfo]) / maxValue) > compare { return false }
if (CGFloat(pointerData[pixelInfo + 1]) / maxValue) > compare { return false }
if (CGFloat(pointerData[pixelInfo + 2]) / maxValue) > compare { return false }
return true
}
}
You call this with:
let count = image.blackPixelCount
The one caveat is that this is a very slow process, even on small images.

Resources