I am trying to get my context (EAGLContext) to render in Swift; but for days I haven't been able to the function glDrawElements to work.
I have read a couple of similar questions here on Stackoverflow but to no avail.
My glDrawElements is as follows:
glDrawElements(GLenum(GL_TRIANGLES), GLsizei(Indices.count), GLenum(GL_UNSIGNED_BYTE), &offset)
I am having problem with the last parameter - offset, which expects a UnsafePointer<Void>.
I have tried the following:
let offset: CConstVoidPointer = COpaquePointer(UnsafePointer<Int>(0))
The above no longer works, because CConstVoidPointer doesn't seem to be available anymore in Swift 1.2.
And:
var offset = UnsafePointer<Int>(other: 0)
The above gives a warning that I should use bitPattern: instead.
Although I don't believe bitPattern: should be used here (because the parameter expects a Word), I decided to give it a try according to the suggestion provided such as the following:
var offset = UnsafePointer<Int>(bitPattern: 0)
glDrawElements(GLenum(GL_TRIANGLES), GLsizei(Indices.count), GLenum(GL_UNSIGNED_BYTE), &offset)
I would get a EXE_BAD_ACCESS (code=EXC_I386_GPFLT) error.
In vain, I also tried something as simple as the following using:
var offsetZero : Int = 0
and subsequently feeding it to the last parameter of glDrawElements like so:
glDrawElements(GLenum(GL_TRIANGLES), GLsizei(Indices.count), GLenum(GL_UNSIGNED_BYTE), &offsetZero)
I am getting the same EXE_BAD_ACCESS like in the case above.
How can I properly form a type suitable for the last parameter of glDrawElements that expects the type UnsafePointer<Void>?
#RobMayoff
Update (adding code for VBO set-up and variables declarations and definitions):
struct vertex
{
var position: (CFloat, CFloat, CFloat)
var color: (CFloat, CFloat, CFloat, CFloat)
}
var vertices =
[
vertex(position: (-1, -1, 0) , color: (1, 1, 0, 1)),
vertex(position: (1, -1, 0) , color: (1, 1, 1, 1)),
vertex(position: (-1, 0, 1) , color: (0, 1, 0, 1)),
vertex(position: (-1, 1, 1), color: (1, 0, 0, 1))
]
let indices: [UInt8] = [ 0, 2, 3, 3, 1, 0 ]
class mainGLView : UIView
{
var layer : CAEAGLLayer!
var context : EAGLContext!
var cBuffer : GLuint = GLuint()
var pos : GLuint = GLuint()
var color : GLuint = GLuint()
var iBuffer : GLuint = GLuint()
var vBuffer : GLuint = GLuint()
var vao : GLuint = GLuint()
override class func layerClass() -> AnyClass
{
return CAEAGLLayer.self
}
required init(coder aDecoder: NSCoder)
{
super.init(coder: aDecoder)
//setting up context, buffers, shaders, etc.
self.configureVBO()
self.setupRendering()
}
func configureVBO()
{
glGenVertexArraysOES(1, &vao);
glBindVertexArrayOES(vao);
glGenBuffers(GLsizei(1), &vBuffer)
glBindBuffer(GLenum(GL_ARRAY_BUFFER), vBuffer)
glBufferData(GLenum(GL_ARRAY_BUFFER), vertices.size(), vertices, GLenum(GL_STATIC_DRAW))
glEnableVertexAttribArray(pos)
var ptr = COpaquePointer(UnsafePointer<Int>(bitPattern: 0))
glVertexAttribPointer(GLuint(pos), GLint(3), GLenum(GL_FLOAT), GLboolean(GL_FALSE), GLsizei(sizeof(vertex)), &ptr)
glEnableVertexAttribArray(color)
let fColor = UnsafePointer<Int>(bitPattern: sizeof(Float) * 3)
glVertexAttribPointer(GLuint(color), 4, GLenum(GL_FLOAT), GLboolean(GL_FALSE), GLsizei(sizeof(vertex)), fColor)
glGenBuffers(1, &iBuffer)
glBindBuffer(GLenum(GL_ELEMENT_ARRAY_BUFFER), iBuffer)
glBufferData(GLenum(GL_ELEMENT_ARRAY_BUFFER), indices.size(), indices, GLenum(GL_STATIC_DRAW))
glBindBuffer(GLenum(GL_ARRAY_BUFFER), 0)
glBindVertexArrayOES(0)
}
func setupRendering()
{
glBindVertexArrayOES(vao);
glViewport(0, 0, GLint(self.frame.size.width), GLint(self.frame.size.height));
indices.withUnsafeBufferPointer
{
(pointer : UnsafeBufferPointer<UInt8>) -> Void in
glDrawElements(GLenum(GL_TRIANGLES), GLsizei(indices.count), GLenum(GL_UNSIGNED_BYTE), UnsafePointer<Void>(pointer.baseAddress))
Void()
}
self.context.presentRenderbuffer(Int(GL_RENDERBUFFER))
glBindVertexArrayOES(0)
}
}
Because of your choices for the count and type arguments, the indices argument (the last argument) needs to be a pointer to the first element of an array, of (at least) length Indices.count, of unsigned bytes. This pointer needs to be converted to UnsafePointer<Void> in Swift.
You didn't show the declaration of your Indices. Let's assume it's declared like this:
let indices: [UInt8] = [ 0, 1, 2, 3, 4, 5, 6, 7, 8 ]
Then you can call glDrawElements by jumping through these hoops:
indices.withUnsafeBufferPointer { (pointer: UnsafeBufferPointer<UInt8>) -> Void in
glDrawElements(GLenum(GL_TRIANGLES), GLsizei(indices.count),
GLenum(GL_UNSIGNED_BYTE),
UnsafePointer<Void>(pointer.baseAddress))
Void()
}
Related
I've written some simple multisampled rendering in Metal. It's just drawing a single solid colored quad. After rendering I read the contents of the resolve texture. This works on Intel and M1 but fails on AMD and NVidia.
Any idea what I'm doing wrong? Metal's API Validation doesn't complain about anything :(
//
// Renderer.swift
// metaltest
//
import Foundation
import Metal
import MetalKit
class Renderer : NSObject, MTKViewDelegate {
let device: MTLDevice
let commandQueue: MTLCommandQueue
let pipelineState: MTLRenderPipelineState
let vertexBuffer: MTLBuffer
let texture: MTLTexture
let resolveTexture: MTLTexture
let width = 16;
let height = 16;
//let samplerState: MTLSamplerState
var frameCount: Int = 0
// This is the initializer for the Renderer class.
// We will need access to the mtkView later, so we add it as a parameter here.
init?(mtkView: MTKView) {
device = mtkView.device!
mtkView.framebufferOnly = true
commandQueue = device.makeCommandQueue()!
// Create the Render Pipeline
do {
pipelineState = try Renderer.buildRenderPipelineWith(device: device, metalKitView: mtkView)
} catch {
print("Unable to compile render pipeline state: \(error)")
return nil
}
// Create our vertex data
let vertices = [
Vertex(pos: [-1, -1]),
Vertex(pos: [ 1, -1]),
Vertex(pos: [-1, 1]),
Vertex(pos: [-1, 1]),
Vertex(pos: [ 1, -1]),
Vertex(pos: [ 1, 1]),
]
// And copy it to a Metal buffer...
vertexBuffer = device.makeBuffer(bytes: vertices, length: vertices.count * MemoryLayout<Vertex>.stride, options: [])!
print("texture size: width: \(width), height: \(height)")
let textureDescriptor = MTLTextureDescriptor.texture2DDescriptor(
pixelFormat: MTLPixelFormat.rgba8Unorm,
width: width,
height: height,
mipmapped: false)
textureDescriptor.sampleCount = 4
textureDescriptor.usage = [.renderTarget]
textureDescriptor.textureType = .type2DMultisample
textureDescriptor.storageMode = .private
texture = device.makeTexture(descriptor: textureDescriptor)!
let resolveTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(
pixelFormat: MTLPixelFormat.rgba8Unorm,
width: width,
height: height,
mipmapped: false)
resolveTextureDescriptor.usage = [.renderTarget]
resolveTexture = device.makeTexture(descriptor: resolveTextureDescriptor)!
}
// Create our custom rendering pipeline, which loads shaders using `device`, and outputs to the format of `metalKitView`
class func buildRenderPipelineWith(device: MTLDevice, metalKitView: MTKView) throws -> MTLRenderPipelineState {
// Create a new pipeline descriptor
let pipelineDescriptor = MTLRenderPipelineDescriptor()
// Setup the shaders in the pipeline
let library = device.makeDefaultLibrary()
pipelineDescriptor.vertexFunction = library?.makeFunction(name: "vertexShader")
pipelineDescriptor.fragmentFunction = library?.makeFunction(name: "fragmentShader")
// Setup the output pixel format to match the pixel format of the metal kit view
pipelineDescriptor.colorAttachments[0].pixelFormat = MTLPixelFormat.rgba8Unorm;
pipelineDescriptor.sampleCount = 4;
// Compile the configured pipeline descriptor to a pipeline state object
return try device.makeRenderPipelineState(descriptor: pipelineDescriptor)
}
// mtkView will automatically call this function
// whenever it wants new content to be rendered.
func draw(in view: MTKView) {
guard let commandBuffer = commandQueue.makeCommandBuffer() else { return }
let renderPassDescriptor = MTLRenderPassDescriptor(); // view.currentRenderPassDescriptor else { return }
renderPassDescriptor.colorAttachments[0].texture = texture;
renderPassDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(1, 1, 1, 1)
renderPassDescriptor.colorAttachments[0].resolveTexture = resolveTexture;
renderPassDescriptor.colorAttachments[0].storeAction = .storeAndMultisampleResolve
guard let renderEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor) else { return }
renderEncoder.setRenderPipelineState(pipelineState)
renderEncoder.setVertexBuffer(vertexBuffer, offset: 0, index: 0)
renderEncoder.drawPrimitives(type: .triangle, vertexStart: 0, vertexCount: 6)
renderEncoder.endEncoding()
commandBuffer.commit()
commandBuffer.waitUntilCompleted()
let pixelCount = width * height
let region = MTLRegionMake2D(0, 0, width, height)
var pixels = Array<UInt8>(repeating: UInt8(0), count: pixelCount * 4)
resolveTexture.getBytes(
&pixels,
bytesPerRow: width * 4,
from: region,
mipmapLevel: 0);
print("dest size: width: \(width), height: \(height)")
print("Top Left : \(String(format:"%02X", pixels[0])), \(String(format:"%02X", pixels[1])), \(String(format:"%02X", pixels[2])), \(String(format:"%02X", pixels[3])), expected: (0x80, 0x99, 0xB2, 0xCC)")
let offset = width * height * 4 - 4;
print("Bottom Right: \(String(format:"%02X", pixels[offset])), \(String(format:"%02X", pixels[offset + 1])), \(String(format:"%02X", pixels[offset + 2])), \(String(format:"%02X", pixels[offset + 3])), expected: (0x80, 0x99, 0xB2, 0xCC)")
exit(0)
}
// mtkView will automatically call this function
// whenever the size of the view changes (such as resizing the window).
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {
}
}
Shader
#include <metal_stdlib>
#include "ShaderDefinitions.h"
using namespace metal;
struct VertexOut {
float4 pos [[position]];
};
vertex VertexOut vertexShader(const device Vertex *vertexArray [[buffer(0)]], unsigned int vid [[vertex_id]])
{
Vertex in = vertexArray[vid];
VertexOut out;
out.pos = float4(in.pos.xy, 0, 1);
return out;
}
fragment float4 fragmentShader()
{
return float4(0.5, 0.6, 0.7, 0.8);
}
ShaderDefinitions.h
#ifndef ShaderDefinitions_h
#define ShaderDefinitions_h
#include <simd/simd.h>
struct Vertex {
vector_float2 pos;
};
#endif /* ShaderDefinitions_h */
The output I expect is:
Top Left : 80, 99, B2, CC, expected: (0x80, 0x99, 0xB2, 0xCC)
Bottom Right: 80, 99, B2, CC, expected: (0x80, 0x99, 0xB2, 0xCC)
Which is what I get on Intel and M1 but on AMD and NVidia I get
Top Left : 00, 00, 00, 00, expected: (0x80, 0x99, 0xB2, 0xCC)
Bottom Right: 00, 00, 00, 00, expected: (0x80, 0x99, 0xB2, 0xCC)
[Intel, Apple M1] - unified memory model
[Nvidia, AMD] - discrete memory model
Understand the Private Mode
A resource with a MTLStorageModePrivate mode is accessible only to the
GPU. In a unified memory model, this resource resides in system
memory. In a discrete memory model, it resides in video memory.
Use this implementation to copy texture data from a Private Texture to a Shared Buffer.
The issue was I needed to call synchronize in order to make the data available to the GPU
guard let blitEncoder = commandBuffer.makeBlitCommandEncoder() else { return }
blitEncoder.synchronize(texture: resolveTexture, slice: 0, level: 0);
blitEncoder.endEncoding();
Inserting that code before commandBuffer.commit() in the code from the question solved the issue
Yes, I know about using CIAreaAverate CIFilter to get the average color of pixels.
I am trying to create some alternative using Accelerate Framework to see if I can come with something faster.
I am rendering a CIImage to a context. For that purpose I have this CIImage extension...
let device: MTLDevice = MTLCreateSystemDefaultDevice()!
let context = CIContext.init(mtlDevice: device, options: [.workingColorSpace: kCFNull])
let w = self.extent.width
let h = self.extent.height
let size = w * h * 4
var bitmap = [UInt8](repeating: 0, count:Int(size))
context.render(self,
toBitmap: &bitmap,
rowBytes: 4 * Int(w),
bounds: self.extent,
format: .BGRA8,
colorSpace: nil)
At this point I have bitmap containing the BGRA bytes interleaved.
To get the average of R, G and B, all I have to do is something like this:
var averageBlue : Int = 0
for x in stride(from:0, through: bitmap.count-4, by: 4) {
let value = bitmap[Int(x)]
averageBlue += Int(value)
}
averageBlue /= numberOfPixels
but this for loop is slow as hell, as expected.
I was thinking about using some Accelerate function like
vDSP_meanvD(bitmap, 2, &r, vDSP_Length(numberOfPixels))
but this function requires bitmap to be an array of UnsafePointer<Double>...
I could convert bitmap to that, but that would require a for loop, that is slow...
Is there any way to extract those R, G and B pixels and have their individual averages using some accelerate stuff going on?
You can convert bitmap to single-precision floating-point values using vDSP_vfltu8(_:_:_:_:_:) :
let bitmap: [UInt8] = [1, 10, 50, 0,
2, 20, 150, 5,
3, 30, 250, 10]
//Blue
var blueFloats = [Float](repeating: 0, count: bitmap.count/4)
vDSP_vfltu8(bitmap,
vDSP_Stride(4),
&blueFloats,
vDSP_Stride(1),
vDSP_Length(blueFloats.count))
And then use vDSP_meanv(_:_:_:_:) :
var blue: Float = 0
vDSP_meanv(blueFloats,
vDSP_Stride(1),
&blue,
vDSP_Length(blueFloats.count))
print("blue =", blue) //2.0
As to the reds :
//Red
var redFloats = [Float](repeating: 0, count: bitmap.count/4)
vDSP_vfltu8(UnsafePointer.init(bitmap).advanced(by: 2),
vDSP_Stride(4),
&redFloats,
vDSP_Stride(1),
vDSP_Length(redFloats.count))
var red: Float = 0
vDSP_meanv(redFloats,
vDSP_Stride(1),
&red,
vDSP_Length(redFloats.count))
print("red =", red) //150.0
Like ielyamani’s said, you can use vDSP_vfltu8 to build that buffer of Float efficiently.
But rather than striding through that array four times, you can also use cblas_sgemv (or cblas_sgemm) to calculate all four averages in a single call:
let pixelCount: Int = width * height
let channelsPerPixel: Int = 4
let m: Int32 = Int32(channelsPerPixel)
let n: Int32 = Int32(pixelCount)
let lda = m
var a = [Float](repeating: 0, count: pixelCount * channelsPerPixel)
vDSP_vfltu8(pixelBuffer, vDSP_Stride(1), &a, vDSP_Stride(1), vDSP_Length(pixelCount * channelsPerPixel))
var x = [Float](repeating: 1 / Float(pixelCount), count: pixelCount)
var y = [Float](repeating: 0, count: channelsPerPixel)
cblas_sgemv(CblasColMajor, CblasNoTrans, m, n, 1, &a, lda, &x, 1, 1, &y, 1)
print(y)
I tried to get int, short from data, the data get from websocket, but some thing is wrong, and when i cast from UInt8List -> Byte Data -> UInt8List, it add 2 new uint8 in my array. Any one suggest me how is correct way to get int from byte array. (It's big Endian, my code in Swift and the base write data in Dart still correct). Thank anyone for reading this.
I am using 'dart:typed_data'; and get data from WebSocket (dart:io)
print(responseData); // UInt8List: [0, 1, 0, 1, 0, 1, 49]
var byteData = responseData.buffer.asByteData();
var array = byteData.buffer.asUint8List();
print(array); // UInt8List: [130, 7, 0, 1, 0, 1, 0, 1, 49]
var shortValue = responseData.buffer.asByteData().getInt16(0);
print(shortValue); // -32249 ( 2 first byte: [0 ,1] so it must be 1 )
There's something else going on, because your code does not add any extra bytes - and actually, it doesn't use array.
This code:
import 'dart:typed_data';
void main() {
Uint8List responseData = Uint8List.fromList([0, 1, 0, 1, 0, 1, 49]);
print(responseData); // UInt8List: [0, 1, 0, 1, 0, 1, 49]
var byteData = responseData.buffer.asByteData();
//var array = byteData.buffer.asUint8List();
//print(array); // UInt8List: [130, 7, 0, 1, 0, 1, 0, 1, 49]
var shortValue = responseData.buffer.asByteData().getInt16(0);
print(shortValue); // -32249 ( 2 first byte: [0 ,1] so it must be 1 )
}
prints (as expected)
[0, 1, 0, 1, 0, 1, 49]
1
EDIT - as suggested in the comment, the Uint8List you have is in fact a view on a ByteBuffer with a non-zero offset. So, responseData.buffer is that underlying buffer, which includes additional bytes. The simplest solution is to make a copy of the view.
import 'dart:typed_data';
void main() {
Uint8List original = Uint8List.fromList([130, 7, 0, 1, 0, 1, 0, 1, 49]);
print(original);
Uint8List view = Uint8List.view(original.buffer, 2);
print(view);
print(view.buffer.lengthInBytes); // prints 9
print(view.buffer.asByteData().getUint16(0)); // unexpected result
Uint8List copy = Uint8List.fromList(view);
print(copy.buffer.lengthInBytes); // prints 7
print(copy.buffer.asByteData().getUint16(0)); // expected result
}
I'm learning OpenGL on iOS by this guide, and I want to implement everything on swift. So, there is some code were I'm getting crash:
Memory structures:
private struct Vertex {
var Position: (GLfloat, GLfloat, GLfloat)
var Color: (GLfloat, GLfloat, GLfloat, GLfloat)
}
private static var Vertices = [
Vertex(Position: (1, -1, 0) , Color: (1, 0, 0, 1)),
Vertex(Position: (1, 1, 0) , Color: (0, 1, 0, 1)),
Vertex(Position: (-1, 1, 0) , Color: (0, 0, 1, 1)),
Vertex(Position: (-1, -1, 0), Color: (0, 0, 0, 1))
]
private static var Indices: [GLubyte] = [
0, 1, 2,
2, 3, 0
]
Create vertex buffers:
var vertexBuffer = GLuint()
glGenBuffers(1, &vertexBuffer)
glBindBuffer(GLenum(GL_ARRAY_BUFFER), vertexBuffer)
glBufferData(GLenum(GL_ARRAY_BUFFER), Vertices.size, Vertices, GLenum(GL_STATIC_DRAW))
var indexBuffer = GLuint()
glGenBuffers(1, &indexBuffer)
glBindBuffer(GLenum(GL_ELEMENT_ARRAY_BUFFER), indexBuffer)
glBufferData(GLenum(GL_ELEMENT_ARRAY_BUFFER), Indices.size, Indices, GLenum(GL_STATIC_DRAW))
Setup memory offsets:
var positionPtr = 0
glVertexAttribPointer(GLuint(positionSlot), 3, GLenum(GL_FLOAT), GLboolean(GL_FALSE), GLsizei(strideofValue(Vertex)), &positionPtr)
var colorPtr = strideof(GLfloat) * 3
glVertexAttribPointer(GLuint(colorSlot), 4, GLenum(GL_FLOAT), GLboolean(GL_FALSE), GLsizei(strideofValue(Vertex)), &colorPtr)
Crash (Trying to draw):
var startPtr = 0
// EXC_BAD_ACCESS code=1 here!
glDrawElements(GLenum(GL_TRIANGLES), GLsizei(Indices.count / 3), GLenum(GL_UNSIGNED_BYTE), &startPtr)
All shaders are compiled without any errors and glClear() draws well, so I suppose my problem is concerned with VBOs.
And here how I calculate size of arrays:
extension Array
{
var size: Int {
get { return self.count * strideof(Element) }
}
}
UPD: I'm using OpenGLES 2.0.
I had learned by you guide for amount 4 months ago. I tried to convert it from objective-c to swift until draw rectangle same below picture.
Now I run it and convert to Swift 2.1. It still work and show same image below.
Here my code (Method setupVBOs, render and struct)
// Track of all our per-vertex information (currently just color and position)
struct Vertex {
var Position: (CFloat, CFloat, CFloat)
var Color: (CFloat, CFloat, CFloat, CFloat)
}
// Array with all the info for each vertex
var Vertices = [
Vertex(Position: (1, -1, 0) , Color: (1, 0, 0, 1)),
Vertex(Position: (1, 1, 0) , Color: (0, 1, 0, 1)),
Vertex(Position: (-1, 1, 0) , Color: (0, 0, 1, 1)),
Vertex(Position: (-1, -1, 0), Color: (0, 0, 0, 1))
]
// Array that gives a list of triangles to create, by specifying the 3 vertices that make up each triangle
var Indices: [GLubyte] = [
0, 1, 2,
2, 3, 0
]
//helper extensions to pass arguments to GL land
extension Array {
func size () -> Int {
return self.count * sizeofValue(self[0])
}
}
//The best way to send data to OpenGL is through something called Vertex Buffer Objects.
func setupVBOs() { // VBO : Vertex Buffer Objects.
//There are two types of vertex buffer objects – one to keep track of the per-vertex data (like we have in the Vertices array), and one to keep track of the indices that make up triangles (like we have in the Indices array).
glGenBuffers(1, &vertexBuffer)
glBindBuffer(GLenum(GL_ARRAY_BUFFER), vertexBuffer)
glBufferData(GLenum(GL_ARRAY_BUFFER), Vertices.count * sizeofValue(Vertices[0]), Vertices, GLenum(GL_STATIC_DRAW)) // send the data over to OpenGL-land.
glGenBuffers(1, &indexBuffer)
glBindBuffer(GLenum(GL_ELEMENT_ARRAY_BUFFER), indexBuffer)
glBufferData(GLenum(GL_ELEMENT_ARRAY_BUFFER), Indices.count * sizeofValue(Indices[0]), Indices, GLenum(GL_STATIC_DRAW))
}
func render() {
glClearColor(0, 104.0/255.0, 55.0/255.0, 1.0)
glClear(GLbitfield(GL_COLOR_BUFFER_BIT))
//glViewport(0, 0, GLint(frame.size.width), GLint(frame.size.height))
glViewport(0, GLint(frame.size.height/2)/2, GLint(frame.size.width), GLint(frame.size.height/2))
// feed the correct values to the two input variables for the vertex shader – the Position and SourceColor attributes.
glVertexAttribPointer(positionSlot, 3, GLenum(GL_FLOAT), GLboolean(UInt8(GL_FALSE)), GLsizei(sizeof(Vertex)), nil)
glVertexAttribPointer(colorSlot, 4, GLenum(GL_FLOAT), GLboolean(UInt8(GL_FALSE)), GLsizei(sizeof(Vertex)), UnsafePointer<Int>(bitPattern: sizeof(Float) * 3))
// This actually ends up calling your vertex shader for every vertex you pass in, and then the fragment shader on each pixel to display on the screen.
glDrawElements(GLenum(GL_TRIANGLES), GLsizei(Indices.count), GLenum(GL_UNSIGNED_BYTE), nil)
_context.presentRenderbuffer(Int(GL_RENDERBUFFER))
}
I'm working with openGL ES 2.0 and swift. I finally got to display the texture but it only one pixel is shown!
Can anyone help with this?
My simple vertex shader:
attribute vec2 TexCoordIn;
varying vec2 TexCoordOut;
void main(void) {
gl_Position = Position;
TexCoordOut = TexCoordIn;
}
And fragment shader:
varying lowp vec2 TexCoordOut;
uniform sampler2D Texture;
void main(void) {
gl_FragColor = texture2D(Texture, TexCoordOut);
}
My view controller:
// My Vertex, Vertices and Indices
struct Vertex {
var Position: (CFloat, CFloat, CFloat)
var Color: (CFloat, CFloat, CFloat, CFloat)
var TexCoord: (CFloat, CFloat)
}
var Vertices = [
Vertex(Position: (1, -1, 0) , Color: (1, 0, 0, 1), TexCoord: (1, 0)),
Vertex(Position: (1, 1, 0) , Color: (0, 1, 0, 1), TexCoord: (1, 1)),
Vertex(Position: (-1, 1, 0) , Color: (0, 0, 1, 1), TexCoord: (0, 1)),
Vertex(Position: (-1, -1, 0), Color: (0, 0, 0, 1), TexCoord: (0, 0))
]
var Indices: [GLubyte] = [
0, 1, 2,
2, 3, 0
]
func compileShaders() {
var vertexShader: GLuint = self.compileShader("Vertex", shaderType: GLenum(GL_VERTEX_SHADER))
var fragmentShader: GLuint = self.compileShader("Fragment", shaderType: GLenum(GL_FRAGMENT_SHADER))
var programHandle: GLuint = glCreateProgram()
glAttachShader(programHandle, vertexShader)
glAttachShader(programHandle, fragmentShader)
glLinkProgram(programHandle)
glUseProgram(programHandle)
glEnableVertexAttribArray(self.positionSlot)
glEnableVertexAttribArray(self.colorSlot)
texCoordSlot = GLuint(glGetAttribLocation(programHandle, "TexCoordIn"))
glEnableVertexAttribArray(texCoordSlot)
let pointer = UnsafePointer<Int>(bitPattern: sizeof(Float) * 7)
glVertexAttribPointer(texCoordSlot, 2, GLenum(GL_FLOAT), GLboolean(UInt8(GL_FALSE)), GLsizei(sizeof(Vertex)), pointer)
textureUniform = GLuint(glGetUniformLocation(programHandle, "Texture"))
}
func setupVBOs() {
glGenVertexArraysOES(1, &VAO);
glBindVertexArrayOES(VAO);
glGenBuffers(1, &vertexBuffer)
glBindBuffer(GLenum(GL_ARRAY_BUFFER), vertexBuffer)
glBufferData(GLenum(GL_ARRAY_BUFFER), Vertices.size(), Vertices, GLenum(GL_STATIC_DRAW))
glEnableVertexAttribArray(positionSlot)
glVertexAttribPointer(positionSlot, 3, GLenum(GL_FLOAT), GLboolean(UInt8(GL_FALSE)), GLsizei(sizeof(Vertex)), nil)
glGenBuffers(1, &indexBuffer)
glBindBuffer(GLenum(GL_ELEMENT_ARRAY_BUFFER), indexBuffer)
glBufferData(GLenum(GL_ELEMENT_ARRAY_BUFFER), Indices.size(), Indices, GLenum(GL_STATIC_DRAW))
glBindBuffer(GLenum(GL_ARRAY_BUFFER), 0)
glBindVertexArrayOES(0)
}
func render() {
glBindVertexArrayOES(VAO);
glActiveTexture(GLenum(GL_TEXTURE0))
glBindTexture(GLenum(GL_TEXTURE_2D), floorTexture)
glUniform1i(GLint(textureUniform), GLint(0))
glDrawElements(GLenum(GL_TRIANGLES), GLsizei(Indices.count), GLenum(GL_UNSIGNED_BYTE), nil)
glContext!.presentRenderbuffer(Int(GL_RENDERBUFFER))
glBindVertexArrayOES(0)
}
The foundation I learned in the great Ray Wenderlich Tutorial. But I could not make it to work in swift.