vertex struct
struct Vertex
{
float x, y, z;
float rhw;
D3DCOLOR diffuse;
float u, v;
Vertex(){}
Vertex(float px, float py, D3DCOLOR pdiffuse, float pu, float pv)
{
x = px; y = py; z = 0.0f;
rhw = 1.0f;
diffuse = pdiffuse;
u = pu; v = pv;
}
static const DWORD FVF;
};
const DWORD Vertex::FVF = D3DFVF_XYZRHW | D3DFVF_DIFFUSE | D3DFVF_TEX1;
vertex data
lpVbData[0] = Vertex(0.0f, 0.0f, 0xFF000000, 0.0f, 0.0f);
lpVbData[1] = Vertex(20.0f, 0.0f, 0xFF000000, 1.0f, 0.0f);
lpVbData[2] = Vertex(20.0f, 100.0f, 0xFF000000, 1.0f, 1.0f);
lpVbData[3] = Vertex(0.0f, 100.0f, 0xFF000000, 0.0f, 1.0f);
My question is
The rect's size should be 20x100(width x height) pixels, but in display is 20x97(width x height) pixels.Why?Thanks.
the problem has been solved.
the backbuffer size dose not matching the window size, ignore the window's frame, so the backbuffer surface be scaled in window, and the display's pixel size is not correct.
Related
CGContextDrawRadialGradient produces a very visible ‘cross’ at the centre of the gradient:
Code (reduced):
- (void)drawRect:(NSRect)dirtyRect {
CGContextRef context = [[NSGraphicsContext currentContext] graphicsPort];
size_t numberOfGradientLocations = 2;
CGFloat startRadius = 0.0f;
CGFloat endRadius = 30.0f;
CGPoint centre = CGPointMake(floorf(self.bounds.size.width / 2), floorf(self.bounds.size.height / 2));
CGFloat gradientColours[8] = {0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f};
CGFloat gradientLocations[2] = {0.0f, 1.0f};
CGColorSpaceRef colourspace = CGColorSpaceCreateDeviceRGB();
CGGradientRef gradient = CGGradientCreateWithColorComponents(colourspace, gradientColours, gradientLocations, numberOfGradientLocations);
CGContextDrawRadialGradient(context, gradient, centre, startRadius, centre, endRadius, kCGGradientDrawsBeforeStartLocation);
}
This happens on both macOS and iOS. Meanwhile, the same kind of gradient renders perfectly in WebKit with CSS (so it’s not some ‘bad display’ issue).
What am I doing wrong? Is there a known way around this?
Here's what I get on iOS; I used the iPhone 7 Plus simulator at 100% so as to make every pixel visible, and it seems completely smooth:
EDIT After some experimentation, I suspect you're seeing a moiré effect caused by misalignment between points and pixels.
I've set up a test project for learning Metal on iOS to do some rendering, but I'm a bit stumped on how to get a prism rotating correctly about its y axis.
Here is the prism rendered without depth testing so all sides can be seen, so it looks like this part is at least correct:
These are the vertices:
static const float vertexData[] = {
0.f, 1.f, 0.5f,
1.f, -1.f, 0.f,
-1.f, -1.f, 0.f,
0.f, 1.f, 0.5f,
1.f, -1.f, 1.f,
1.f, -1.f, 0.f,
0.f, 1.f, 0.5f,
-1.f, -1.f, 1.f,
1.f, -1.f, 1.f,
0.f, 1.f, 0.5f,
-1.f, -1.f, 0.f,
-1.f, -1.f, 1.f
};
However, when I turn on rotation, this is what happens: https://www.dropbox.com/s/esg41j3ibncofox/prism_rotate.mov?dl=0
(The video has depth testing turned on). Why is the prism clipping like that (and/or what is it clipping through)? And it's not rotating about it's centre.
Here are how the MVP matrices are being calculated:
static simd::float3 viewEye = {0.f, 0.f, -2.f};
static simd::float3 viewCenter = {0.f, 0.f, 1.f};
static simd::float3 viewUp = {0.f, 1.f, 0.f};
static float fov = 45.f;
CGSize size = self.view.bounds.size;
Uniforms *uniforms = (Uniforms *)[uniformBuffer contents];
uniforms->view = AAPL::lookAt(viewEye, viewCenter, viewUp);
uniforms->projection = AAPL::perspective_fov(fov, size.width, size.height, 0.1f, 100.f);
uniforms->model = AAPL::translate(0.f, 0.f, 12.f) * AAPL::rotate(tAngle, 0.f, 1.f, 0.f);
tAngle += 0.5f;
The transform, lookAt, rotate, and perspective_fov functions are lifted straight from Apple sample code I used as reference.
Here is the shader:
typedef struct {
float4 pos [[ position ]];
half4 color;
float mod;
} VertexOut;
vertex VertexOut basic_vertex(const device packed_float3* vertex_array [[ buffer(0) ]],
const device packed_float3* colors [[ buffer(1) ]],
constant Uniforms& uniform [[ buffer(2) ]],
uint vid [[ vertex_id ]],
uint iid [[ instance_id ]])
{
float4 v = float4(vertex_array[vid], 1.f);
float4x4 mvp_matrix = uniform.projection * uniform.view * uniform.model;
VertexOut out;
out.pos = v * mvp_matrix;
uint colorIndex = vid / 3;
out.color = half4(half3(colors[colorIndex]), 1.f);
return out;
}
fragment half4 basic_fragment(VertexOut f [[ stage_in ]]) {
return f.color;
}
Any help/tips would be greatly appreciated.
Sigh..
The problem was in the multiplication order of the MVP matrix with the vertices in the shader.
So this:
out.pos = v * mvp_matrix;
should be:
out.pos = mvp_matrix * v;
For whatever reason, I'm used to row vectors as opposed to column vectors, and had myself convinced I was misunderstanding something about the clip region and/or the matrices themselves..
I am working on a 3D 1st person game, using the gyro in the phone, people can look up and down, side to side etc. I also want to add manual controls to add to this.
This code handles the gyro
GLKMatrix4 deviceMotionAttitudeMatrix;
if (_cmMotionmanager.deviceMotionActive) {
CMDeviceMotion *deviceMotion = _cmMotionmanager.deviceMotion;
GLKMatrix4 baseRotation = GLKMatrix4MakeRotation(M_PI_2, 0.0f , 0.0f , 1.0f );
// Note: in the simulator this comes back as the zero matrix.
// on device, this doesn't include the changes required to match screen rotation.
CMRotationMatrix a = deviceMotion.attitude.rotationMatrix;
deviceMotionAttitudeMatrix
= GLKMatrix4Make(a.m11, a.m21, a.m31, 0.0f,
a.m12 , a.m22, a.m32, 0.0f,
a.m13 , a.m23 , a.m33 , 0.0f,
0.0f, 0.0f, 0.0f, 1.0f);
deviceMotionAttitudeMatrix = GLKMatrix4Multiply(baseRotation, deviceMotionAttitudeMatrix);
// NSLog(#"%f %f %f %f %f %f %f %f", a.m11, a.m21, a.m31,
// a.m12 , a.m22, a.m32, a.m23, a.m33);
}
I then want to add the manual values for up/down left/up etc.
I can easily add left right
deviceMotionAttitudeMatrix = GLKMatrix4RotateZ(deviceMotionAttitudeMatrix, (self.rotateLeft / 20));
However when I add up down, it works in north/south direction but east/west direction then rotates on a different axis
deviceMotionAttitudeMatrix = GLKMatrix4RotateY(deviceMotionAttitudeMatrix, (self.rotateTop / 20));
I have tried to multiple the matrix but that also has the same issue. It feels like I need to merge them not multiple them
GLKMatrix4 leftRotation = GLKMatrix4MakeRotation((self.rotateLeft / 20), 0.0f, 0.0f, 1.0f);
GLKMatrix4 TopRotation = GLKMatrix4MakeRotation(-(self.rotateTop / 20), 0.0f, 1.0f, 0.0f);
GLKMatrix4 complete = GLKMatrix4Multiply(TopRotation, leftRotation);
deviceMotionAttitudeMatrix = GLKMatrix4Multiply(deviceMotionAttitudeMatrix,complete);
In our app we'd like to be able to crop, scale and pan an image, and I just can't seem to figure out how I am supposed to draw a cropping region on top of my UIImageView.
I tried messing with coregraphics, I could render a region with a black stroke on my image, but the image would flip. Not only that, but since I had drawn ON the image, I'm afraid that if I were to move and scale it using gestures, the region would be affected too!
A push in the right direction would be much appreciated!
Heres my code that doesen't really do what I want, to show some research effort.
// Aspect ration - Currently 1:1
const int arWidth = 1;
const int arHeight = 1;
UIGraphics.BeginImageContext(ImgToCrop.Frame.Size);
var context = UIGraphics.GetCurrentContext();
// Set the line width
context.SetLineWidth(4);
UIColor.Black.SetStroke();
// Our starting points.
float x = 0, y = 0;
// The sizes
float width = ImgToCrop.Frame.Width, height = ImgToCrop.Frame.Height;
// Calculate the geometry
if(arWidth == arHeight){
// The aspect ration is 1:1
width = ImgToCrop.Frame.Width;
height = width;
x = 0;
y = ImgToCrop.Frame.GetMidY()-height/2;
}
// The rect
var drawRect = new RectangleF(x,y,width,height);
context.DrawImage(new RectangleF(
ImgToCrop.Frame.X,
ImgToCrop.Frame.Y,
ImgToCrop.Frame.Width,
ImgToCrop.Frame.Height),ImgToCrop.Image.CGImage);
// Draw it
context.StrokeRect(drawRect);
ImgToCrop.Image = UIGraphics.GetImageFromCurrentImageContext();
Maybe this will help you:
public static UIImage ScaleToSize (UIImage image, int width, int height)
{
UIGraphics.BeginImageContext (new SizeF (width, height));
CGContext ctx = UIGraphics.GetCurrentContext ();
float ratio = (float) width / (float) height;
ctx.AddRect (new RectangleF (0.0f, 0.0f, width, height));
ctx.Clip ();
var cg = image.CGImage;
float h = cg.Height;
float w = cg.Width;
float ar = w / h;
if (ar != ratio) {
// Image's aspect ratio is wrong so we'll need to crop
float scaleY = height / h;
float scaleX = width / w;
PointF offset;
SizeF crop;
float size;
if (scaleX >= scaleY) {
size = h * (w / width);
offset = new PointF (0.0f, h / 2.0f - size / 2.0f);
crop = new SizeF (w, size);
} else {
size = w * (h / height);
offset = new PointF (w / 2.0f - size / 2.0f, 0.0f);
crop = new SizeF (size, h);
}
// Crop the image and flip it to the correct orientation (otherwise it will be upside down)
ctx.ScaleCTM (1.0f, -1.0f);
using (var copy = cg.WithImageInRect (new RectangleF (offset, crop))) {
ctx.DrawImage (new RectangleF (0.0f, 0.0f, width, -height), copy);
}
} else {
image.Draw (new RectangleF (0.0f, 0.0f, width, height));
}
UIImage scaled = UIGraphics.GetImageFromCurrentImageContext ();
UIGraphics.EndImageContext ();
return scaled;
}
i'm using box2d for the first time, and i've set up my shapes via the hello world tutorial.
I am creating a box as so:
b2BodyDef bodyDef;
bodyDef.type = b2_kinematicBody;
bodyDef.position.Set(7.0f, 7.0f);
bodyDef.angle = 0;
m_body = m_world->CreateBody(&bodyDef);
b2PolygonShape shape;
shape.SetAsBox(1.5f, 0.5f);
b2FixtureDef fixtureDef;
fixtureDef.shape = &shape;
fixtureDef.density = 1.0f;
m_body->CreateFixture(&fixtureDef);
Now I am ready to render this box, so I call:
b2Vec2 pos = m_body->GetPosition();
At this point, I need to call m_renderTarget->SetTransform() using the values of pos, but I can't figure out how to render the box correctly. I have tried:
m_renderTarget->SetTransform(D2D1::Matrix3x2F::Translation(pos.x * 30, pos.y * 30));
m_renderTarget->DrawRectangle(D2D1::RectF(0.0f, 0.0f, 3.0f * 30.0f, 1.0f * 30.0f), m_brush);
And the circle:
bodyDef.type = b2_dynamicBody;
bodyDef.position.Set(7.0f, 1.0f);
m_circleBody = m_world->CreateBody(&bodyDef);
b2CircleShape circleShape;
circleShape.m_p.Set(0.0f, 0.0f);
circleShape.m_radius = 0.5f;
fixtureDef.shape = &circleShape;
m_circleBody->CreateFixture(&fixtureDef);
And to render the circle:
b2Vec2 circlePos = m_circleBody->GetPosition();
mpRenderTarget->SetTransform(D2D1::Matrix3x2F::Translation(circlePos.x * 30.0f, circlePos.y * 30.0f));
mpRenderTarget->DrawEllipse(D2D1::Ellipse(D2D1::Point2F(0.0f, 0.0f), 30.0f, 30.0f), m_brush);
You aren't drawing your rectangle properly centered. The rectangle's center is the top left.
m_renderTarget->DrawRectangle(D2D1::RectF(0.0f, 0.0f, 3.0f * 30.0f, 1.0f * 30.0f), m_brush);
In order to center it properly, you should have Left = -Right, Top = -Bottom like so
m_renderTarget->DrawRectangle(D2D1::RectF(-1.5 * 30.f, -0.5 * 30.f, 1.5f * 30.0f, 0.5f * 30.0f), m_brush);
Here's a diagram explaining why centering is important:
Physically you represent both shapes properly, but graphically you unknowingly added an offset to the rectangle. Also, your scale is off: you assume 1 = 30 pixels for the rectangle, and 0.5 = 30 pixels for the circle. Consistency is key in simulations, so you should lower your D2D1::Ellipse's radii to 15 each.