HLSL: Which DDX DDY are expected for TextureCube.SampleGrad() - directx

I am wondering which DDX DDY values the SampleGrad() function expects for a TextureCube object.
I know that it's the change in UV coordinates for 2D textures. So I thought, it would be the change in the direction in this case. However, this does not seem to be the case.
I get different results if I try to use the Sample function vs. SampleGrad:
Sample:
// calculate reflected ray
float3 reflRay = reflect(-viewDir, normal);
// reflection map lookup
return reflectionMap.Sample(linearSampler, reflRay);
SampleGrad:
// calculate reflected ray
float3 reflRay = reflect(-viewDir, normal);
// reflection map lookup
float3 dxr = ddx(reflRay);
float3 dyr = ddy(reflRay);
return reflectionMap.SampleGrad(linearSampler, reflRay, dxr, dyr);

I still don't know which values for DDX and DDY are required, but if found an acceptable workaround that computes the level of detail for my gradients. Unfortunately, the quality of this solution is not as good as a real Sample function with anisotropic filtering.
In case anyone needs it:
The computation is described in: https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#LODCalculation
My HLSL implementation:
// calculate reflected ray
float3 reflRay = reflect(-viewDir, normal);
// reflection map lookup
float3 dxr = ddx(reflRay);
float3 dyr = ddy(reflRay);
// cubemap size for lod computation
float reflWidth, reflHeight;
reflectionMap.GetDimensions(reflWidth, reflHeight);
// calculate lod based on raydiffs
float lod = calcLod(getCubeDiff(reflRay, dxr).xy * reflWidth, getCubeDiff(reflRay, dyr).xy * reflHeight);
return reflectionMap.SampleLevel(linearSampler, reflRay, lod).rgb;
Helper functions:
float pow2(float x) {
return x * x;
}
// calculates texture coordinates [-1, 1] for the view direction (xy values must be divided by axisMajorValue for proper [-1, 1] range).else
// z coordinate is the faceId
float3 getCubeCoord(float3 viewDir, out float axisMajorValue)
{
// according to dx spec: https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#PointSampling
// Choose the largest magnitude component of the input vector. Call this magnitude of this value AxisMajor. In the case of a tie, the following precedence should occur: Z, Y, X.
int axisMajor = 0;
int axisFlip = 0;
axisMajorValue = 0.0;
[unroll] for (int i = 0; i < 3; ++i)
{
if (abs(viewDir[i]) >= axisMajorValue)
{
axisMajor = i;
axisFlip = viewDir[i] < 0.0f ? 1 : 0;
axisMajorValue = abs(viewDir[i]);
}
}
int faceId = axisMajor * 2 + axisFlip;
// Select and mirror the minor axes as defined by the TextureCube coordinate space. Call this new 2d coordinate Position.
int axisMinor1 = axisMajor == 0 ? 2 : 0; // first coord is x or z
int axisMinor2 = 3 - axisMajor - axisMinor1;
// Project the coordinate onto the cube by dividing the components Position by AxisMajor.
//float u = viewDir[axisMinor1] / axisMajorValue;
//float v = -viewDir[axisMinor2] / axisMajorValue;
// don't project for getCubeDiff function!
float u = viewDir[axisMinor1];
float v = -viewDir[axisMinor2];
switch (faceId)
{
case 0:
case 5:
u *= -1.0f;
break;
case 2:
v *= -1.0f;
break;
}
return float3(u, v, float(faceId));
}
float3 getCubeDiff(float3 ray, float3 diff)
{
// from: https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#LODCalculation
// Using TC, determine which component is of the largest magnitude, as when calculating the texel location. If any of the components are equivalent, precedence is as follows: Z, Y, X. The absolute value of this will be referred to as AxisMajor.
// select and mirror the minor axes of TC as defined by the TextureCube coordinate space to generate TC'.uv
float axisMajor;
float3 tuv = getCubeCoord(ray, axisMajor);
// select and mirror the minor axes of the partial derivative vectors as defined by the TextureCube coordinate space, generating 2 new partial derivative vectors dX'.uv & dY'.uv.
float derivateMajor;
float3 duv = getCubeCoord(diff, derivateMajor);
// Calculate 2 new dX and dY vectors for future calculations as follows:
// dX.uv = (AxisMajor*dX'.uv - TC'.uv*DerivativeMajorX)/(AxisMajor*AxisMajor)
float3 res;
res.z = 0.0;
res.xy = (axisMajor * duv.xy - tuv.xy * derivateMajor) / (axisMajor * axisMajor);
return res * 0.5;
}
// dx, dy in pixel coordinates
float calcLod(float2 dX, float2 dY)
{
// from: https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#LODCalculation
float A = pow2(dX.y) + pow2(dY.y);
float B = -2.0 * (dX.x * dX.y + dY.x * dY.y);
float C = pow2(dX.x) + pow2(dY.x);
float F = pow2(dX.x * dY.y - dY.x * dX.y);
float p = A - C;
float q = A + C;
float t = sqrt(pow2(p) + pow2(B));
float lengthX = sqrt(abs(F * (t+p) / ( t * (q+t))) + abs(F * (t-p) / ( t * (q+t))));
float lengthY = sqrt(abs(F * (t-p) / ( t * (q-t))) + abs(F * (t+p) / ( t * (q-t))));
return log2(max(lengthX,lengthY));
}

Related

Metal equivalent to OpenGL mix

I'm trying to understand what is the equivalent of mix OpenGL function in metal. This is the OpenGL code I'm trying to convert:
float udRoundBox( vec2 p, vec2 b, float r )
{
return length(max(abs(p)-b+r,0.0))-r;
}
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// setup
float t = 0.2 + 0.2 * sin(mod(iTime, 2.0 * PI) - 0.5 * PI);
float iRadius = min(iResolution.x, iResolution.y) * (0.05 + t);
vec2 halfRes = 0.5 * iResolution.xy;
// compute box
float b = udRoundBox( fragCoord.xy - halfRes, halfRes, iRadius );
// colorize (red / black )
vec3 c = mix( vec3(1.0,0.0,0.0), vec3(0.0,0.0,0.0), smoothstep(0.0,1.0,b) );
fragColor = vec4( c, 1.0 );
}
I was able to convert part of it so far:
float udRoundBox( float2 p, float2 b, float r )
{
return length(max(abs(p)-b+r,0.0))-r;
}
float4 cornerRadius(sampler_h src) {
float2 greenCoord = src.coord(); // this is alreay in relative coords; no need to devide by image size
float t = 0.5;
float iRadius = min(greenCoord.x, greenCoord.y) * (t);
float2 halfRes = float2(greenCoord.x * 0.5, greenCoord.y * 0.5);
float b = udRoundBox( float2(greenCoord.x - halfRes.x, greenCoord.y - halfRes.y), halfRes, iRadius );
float3 c = mix(float3(1.0,0.0,0.0), float3(0.0,0.0,0.0), smoothstep(0.0,1.0,b) );
return float4(c, 1.0);
}
But it's producing green screen. I'm trying to achieve corner radius on a video like so:
The mix function is an implementation of linear interpolation, more frequently referred to as a Lerp function.
You can use linear interpolation where you have a value, let's say t and you want to know how that value maps within a certain range.
For example if I have three values:
a = 0
b = 1
and
t = 0.5
I could call mix(a,b,t) and my result would be 0.5. That is because the mix function expects a start range value, an end range value and a factor by which to interpolate, so I get 0.5 which is halfway between 0 and 1.
Looking at the documentation Metal has an implementation of mix that does a linear interpolation.
The problem is, that greenCoord (which was only a good variable name for the other question you asked, by the way) is the relative coordinate of the current pixel and has nothing to do with the absolute input resolution.
If you want a replacement for your iResolution, use src.size() instead.
And it seems you need your input coordinates in absolute (pixel) units. You can achieve that by adding a destination parameter to the inputs of your kernel like so:
float4 cornerRadius(sampler src, destination dest) {
const float2 destCoord = dest.coord(); // pixel position in the output buffer in absolute coordinates
const float2 srcSize = src.size();
const float t = 0.5;
const float radius = min(srcSize.x, srcSize.y) * t;
const float2 halfRes = 0.5 * srcSize;
const float b = udRoundBox(destCoord - halfRes, halfRes, radius);
const float3 c = mix(float3(1.0,0.0,0.0), float3(0.0,0.0,0.0), smoothstep(0.0,1.0,b) );
return float4(c, 1.0);
}

OpenCV + OpenGL Using solvePnP camera pose - object is offset from detected marker

I have a problem in my iOS application where i attempt to obtain a view matrix using solvePnP and render a 3d cube using modern OpenGL. While my code attempts to render a 3d cube directly on top of the detected marker, it seems to render with a certain offset from the marker (see video for example)
https://www.youtube.com/watch?v=HhP5Qr3YyGI&feature=youtu.be
(on the bottom right of the image you can see an opencv render of the homography around the tracker marker. the rest of the screen is an opengl render of the camera input frame and a 3d cube at location (0,0,0).
the cube rotates and translates correctly whenever i move the marker, though it is very telling that there is some difference in the scale of translations (IE, if i move my marker 5cm in the real world, it hardly moves by 1cm on screen)
these are what i believe to be the relevant parts of the code where the error could come from :
Extracting view matrix from homography :
AVCaptureDevice *deviceInput = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
AVCaptureDeviceFormat *format = deviceInput.activeFormat;
CMFormatDescriptionRef fDesc = format.formatDescription;
CGSize dim = CMVideoFormatDescriptionGetPresentationDimensions(fDesc, true, true);
const float cx = float(dim.width) / 2.0;
const float cy = float(dim.height) / 2.0;
const float HFOV = format.videoFieldOfView;
const float VFOV = ((HFOV)/cx)*cy;
const float fx = abs(float(dim.width) / (2 * tan(HFOV / 180 * float(M_PI) / 2)));
const float fy = abs(float(dim.height) / (2 * tan(VFOV / 180 * float(M_PI) / 2)));
Mat camIntrinsic = Mat::zeros(3, 3, CV_64F);
camIntrinsic.at<double>(0, 0) = fx;
camIntrinsic.at<double>(0, 2) = cx;
camIntrinsic.at<double>(1, 1) = fy;
camIntrinsic.at<double>(1, 2) = cy;
camIntrinsic.at<double>(2, 2) = 1.0;
std::vector<cv::Point3f> object3dPoints;
object3dPoints.push_back(cv::Point3f(-0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,+0.5f,0));
object3dPoints.push_back(cv::Point3f(-0.5f,+0.5f,0));
cv::Mat raux,taux;
cv::Mat Rvec, Tvec;
cv::solvePnP(object3dPoints, mNewImageBounds, camIntrinsic, Mat(),raux,taux); //mNewImageBounds are the 4 corner of the homography detected by perspectiveTransform (the green outline seen in the image)
raux.convertTo(Rvec,CV_32F);
taux.convertTo(Tvec ,CV_64F);
Mat Rot(3,3,CV_32FC1);
Rodrigues(Rvec, Rot);
// [R | t] matrix
Mat_<double> para = Mat_<double>::eye(4,4);
Rot.convertTo(para(cv::Rect(0,0,3,3)),CV_64F);
Tvec.copyTo(para(cv::Rect(3,0,1,3)));
Mat cvToGl = Mat::zeros(4, 4, CV_64F);
cvToGl.at<double>(0, 0) = 1.0f;
cvToGl.at<double>(1, 1) = -1.0f; // Invert the y axis
cvToGl.at<double>(2, 2) = -1.0f; // invert the z axis
cvToGl.at<double>(3, 3) = 1.0f;
para = cvToGl * para;
Mat_<double> modelview_matrix;
Mat(para.t()).copyTo(modelview_matrix); // transpose to col-major for OpenGL
glm::mat4 openGLViewMatrix;
for(int col = 0; col < modelview_matrix.cols; col++)
{
for(int row = 0; row < modelview_matrix.rows; row++)
{
openGLViewMatrix[col][row] = modelview_matrix.at<double>(col,row);
}
}
i made sure the camera intrinsic matrix contains correct values, the portion which converts the opencv Mat to an opengl view matrix i believe to be correct as the cube translates and rotates in the right directions.
once the view matrix is calculated, i use it to draw the cube as follows :
_projectionMatrix = glm::perspective<float>(radians(60.0f), fabs(view.bounds.size.width / view.bounds.size.height), 0.1f, 100.0f);
_cube_ModelMatrix = glm::translate(glm::vec3(0,0,0));
const mat4 MVP = _projectionMatrix * openGLViewMatrix * _cube_ModelMatrix;
glUniformMatrix4fv(glGetUniformLocation(_cube_program, "ModelMatrix"), 1, GL_FALSE, value_ptr(MVP));
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, BUFFER_OFFSET(0));
Is anyone able to spot my error?
You should create perspective matrix as explained here: http://ksimek.github.io/2013/06/03/calibrated_cameras_in_opengl
Here is quick code:
const float fx = intrinsicParams(0, 0); // Focal length in x axis
const float fy = intrinsicParams(1, 1); // Focal length in y axis
const float cx = intrinsicParams(0, 2); // Primary point x
const float cy = intrinsicParams(1, 2); // Primary point y
projectionMatrix(0, 0) = 2.0f * fx;
projectionMatrix(0, 1) = 0.0f;
projectionMatrix(0, 2) = 0.0f;
projectionMatrix(0, 3) = 0.0f;
projectionMatrix(1, 0) = 0.0f;
projectionMatrix(1, 1) = 2.0f * fy;
projectionMatrix(1, 2) = 0.0f;
projectionMatrix(1, 3) = 0.0f;
projectionMatrix(2, 0) = 2.0f * cx - 1.0f;
projectionMatrix(2, 1) = 2.0f * cy - 1.0f;
projectionMatrix(2, 2) = -(far + near) / (far - near);
projectionMatrix(2, 3) = -1.0f;
projectionMatrix(3, 0) = 0.0f;
projectionMatrix(3, 1) = 0.0f;
projectionMatrix(3, 2) = -2.0f * far * near / (far - near);
projectionMatrix(3, 3) = 0.0f;
For more information about intrinsic matrix: http://ksimek.github.io/2013/08/13/intrinsic

DX 11 Compute Shader\SharpDX Deferrerd Tiled lighting, Point light problems

I have just finished porting my engine from XNA to SharpDX(DX11).
Everything is going really well and I have conquered most of my issues without having to ask for help until now and I'm really stuck, maybe I just need another set of eye to look over my code idk but here it is.
I'm implementing tile based lighting (point lights only for now), I'm basing my code off the Intel sample because it's not as messy as the ATI one.
So my problem is that the lights move with the camera, I have looked all over the place to find a fix and I have tried everything (am I crazy?).
I just made sure all my normal and light vectors are in view space and normalized (still the same).
I have tried with the inverse View, inverse Projection, a mix of the both and a few other bits from over the net but I can't fix it.
So here is my CPU code:
Dim viewSpaceLPos As Vector3 = Vector3.Transform(New Vector3(pointlight.PosRad.X, pointlight.PosRad.Y, pointlight.PosRad.Z), Engine.Camera.EyeTransform)
Dim lightMatrix As Matrix = Matrix.Scaling(pointlight.PosRad.W) * Matrix.Translation(New Vector3(pointlight.PosRad.X, pointlight.PosRad.Y, pointlight.PosRad.Z))
Here is my CS shader code:
[numthreads(GROUP_WIDTH, GROUP_HEIGHT, GROUP_DEPTH)]
void TileLightingCS(uint3 dispatchThreadID : SV_DispatchThreadID, uint3 GroupID : SV_GroupID, uint3 GroupThreadID : SV_GroupThreadID)
{
int2 globalCoords = dispatchThreadID.xy;
uint groupIndex = GroupThreadID.y * GROUP_WIDTH + GroupThreadID.x;
float minZSample = FrameBufferCamNearFar.x;
float maxZSample = FrameBufferCamNearFar.y;
float2 gbufferDim;
DepthBuffer.GetDimensions(gbufferDim.x, gbufferDim.y);
float2 screenPixelOffset = float2(2.0f, -2.0f) / gbufferDim;
float2 positionScreen = (float2(globalCoords)+0.5f) * screenPixelOffset.xy + float2(-1.0f, 1.0f);
float depthValue = DepthBuffer[globalCoords].r;
float3 positionView = ComputePositionViewFromZ(positionScreen, Projection._43 / (depthValue - Projection._33));
// Avoid shading skybox/background or otherwise invalid pixels
float viewSpaceZ = positionView.z;
bool validPixel = viewSpaceZ >= FrameBufferCamNearFar.x && viewSpaceZ < FrameBufferCamNearFar.y;
[flatten] if (validPixel)
{
minZSample = min(minZSample, viewSpaceZ);
maxZSample = max(maxZSample, viewSpaceZ);
}
// How many total lights?
uint totalLights, dummy;
InputBuffer.GetDimensions(totalLights, dummy);
// Initialize shared memory light list and Z bounds
if (groupIndex == 0)
{
sTileNumLights = 0;
sMinZ = 0x7F7FFFFF; // Max float
sMaxZ = 0;
}
GroupMemoryBarrierWithGroupSync();
if (maxZSample >= minZSample) {
InterlockedMin(sMinZ, asuint(minZSample));
InterlockedMax(sMaxZ, asuint(maxZSample));
}
GroupMemoryBarrierWithGroupSync();
float minTileZ = asfloat(sMinZ);
float maxTileZ = asfloat(sMaxZ);
// Work out scale/bias from [0, 1]
float2 tileScale = float2(FrameBufferCamNearFar.zw) * rcp(float(2 * GROUP_WIDTH));
float2 tileBias = tileScale - float2(GroupID.xy);
// Now work out composite projection matrix
// Relevant matrix columns for this tile frusta
float4 c1 = float4(Projection._11 * tileScale.x, 0.0f, tileBias.x, 0.0f);
float4 c2 = float4(0.0f, -Projection._22 * tileScale.y, tileBias.y, 0.0f);
float4 c4 = float4(0.0f, 0.0f, 1.0f, 0.0f);
// Derive frustum planes
float4 frustumPlanes[6];
// Sides
frustumPlanes[0] = c4 - c1;
frustumPlanes[1] = c4 + c1;
frustumPlanes[2] = c4 - c2;
frustumPlanes[3] = c4 + c2;
// Near/far
frustumPlanes[4] = float4(0.0f, 0.0f, 1.0f, -minTileZ);
frustumPlanes[5] = float4(0.0f, 0.0f, -1.0f, maxTileZ);
// Normalize frustum planes (near/far already normalized)
[unroll] for (uint i = 0; i < 4; ++i)
{
frustumPlanes[i] *= rcp(length(frustumPlanes[i].xyz));
}
// Cull lights for this tile
for (uint lightIndex = groupIndex; lightIndex < totalLights; lightIndex += (GROUP_WIDTH * GROUP_HEIGHT))
{
PointLight light = InputBuffer[lightIndex];
float3 lightVS = light.PosRad.xyz;// mul(float4(light.Pos.xyz, 1), View);
// Cull: point light sphere vs tile frustum
bool inFrustum = true;
[unroll]
for (uint i = 0; i < 6; ++i)
{
float d = dot(frustumPlanes[i], float4(lightVS, 1.0f));
inFrustum = inFrustum && (d >= -light.PosRad.w);
}
[branch]
if (inFrustum)
{
uint listIndex;
InterlockedAdd(sTileNumLights, 1, listIndex);
sTileLightIndices[listIndex] = lightIndex;
}
}
GroupMemoryBarrierWithGroupSync();
uint numLights = sTileNumLights;
if (all(globalCoords < FrameBufferCamNearFar.zw))
{
float4 NormalMap = NormalBuffer[globalCoords];
float3 normal = DecodeNormal(NormalMap);
if (numLights > 0)
{
float3 lit = float3(0.0f, 0.0f, 0.0f);
for (uint tileLightIndex = 0; tileLightIndex < numLights; ++tileLightIndex)
{
PointLight light = InputBuffer[sTileLightIndices[tileLightIndex]];
float3 lDir = light.PosRad.xyz - positionView;
lDir = normalize(lDir);
float3 nl = saturate(dot(lDir, normal));
lit += ((light.Color.xyz * light.Color.a) * nl) * 0.1f;
}
PointLightColor[globalCoords] = float4(lit, 1);
}
else
{
PointLightColor[globalCoords] = 0;
}
}
GroupMemoryBarrierWithGroupSync();
}
So I know the culling works because there are lights drawn, they just move with the camera.
Could it be a handedness issue?
Am I setting my CPU light code up right?
Have I messed my spaces up?
What am I missing?
Am I reconstructing my position from depth wrong? (don't think it's this because the culling works)
ps. I write depth out like this:
VS shader
float4 viewSpacePos = mul(float4(input.Position,1), WV);
output.Depth=viewSpacePos.z ;
PS Shader
-input.Depth.x / FarClip

Un-Distort raw images received from the Leap motion cameras

I've been working with the leap for a long time now. 2.1.+ SDK version allows us to access the cameras and get raw images. I want to use those images with OpenCV for square/circle detection and stuff... the problem is i can't get those images undistorted. i read the docs, but don't quite get what they mean. here's one thing i need to understand properly before going forward
distortion_data_ = image.distortion();
for (int d = 0; d < image.distortionWidth() * image.distortionHeight(); d += 2)
{
float dX = distortion_data_[d];
float dY = distortion_data_[d + 1];
if(!((dX < 0) || (dX > 1)) && !((dY < 0) || (dY > 1)))
{
//what do i do now to undistort the image?
}
}
data = image.data();
mat.put(0, 0, data);
//Imgproc.Canny(mat, mat, 100, 200);
//mat = findSquare(mat);
ok.showImage(mat);
in the docs it says something like this "
The calibration map can be used to correct image distortion due to lens curvature and other imperfections. The map is a 64x64 grid of points. Each point consists of two 32-bit values....(the rest on the dev website)"
can someone explain this in detail please, OR OR, just post the java code to undistort the images give me an output MAT image so i may continue processing that (i'd still prefer a good explanation if possible)
Ok, I have no leap camera to test all this, but this is how I understand the documentation:
The calibration map does not hold offsets but full point positions. An entry says where the pixel has to be placed instead. Those values are mapped between 0 and 1, which means that you have to mutiply them by your real image width and height.
What isnt explained explicitly is, how you pixel positions are mapped to 64 x 64 positions of your calibration map. I assume that it's the same way: 640 pixels width are mapped to 64 pixels width and 240 pixels height are mapped to 64 pixels height.
So in general, to move from one of your 640 x 240 pixel positions (pX, pY) to the undistorted position you will:
compute corresponding pixel position in the calibration map: float cX = pX/640.0f * 64.0f; float cY = pY/240.0f * 64.0f;
(cX, cY) is now the locaion of that pixel in the calibration map. You will have to interpolate between two pixel locaions, but I will now only explain how to go on for a discrete location in the calibration map (cX', cY') = rounded locations of (cX, cY).
read the x and y values out of the calibration map: dX, dY as in the documentation. You have to compute the location in the array by: d = dY*calibrationMapWidth*2 + dX*2;
dX and dY are values between 0 and 1 (if not: dont undistort this point because there is no undistortion available. To find out the pixel location in your real image, multiply by the image size: uX = dX*640; uY = dY*240;
set your pixel to the undistorted value: undistortedImage(pX,pY) = distortedImage(uX,uY);
but you dont have discrete point positions in your calibration map, so you have to interpolate. I'll give you an example:
let be (cX,cY) = (13.7, 10.4)
so you read from your calibration map four values:
calibMap(13,10) = (dX1, dY1)
calibMap(14,10) = (dX2, dY2)
calibMap(13,11) = (dX3, dY3)
calibMap(14,11) = (dX4, dY4)
now your undistorted pixel position for (13.7, 10.4) is (multiply each with 640 or 240 to get uX1, uY1, uX2, etc):
// interpolate in x direction first:
float tmpUX1 = uX1*0.3 + uX2*0.7
float tmpUY1 = uY1*0.3 + uY2*0.7
float tmpUX2 = uX3*0.3 + uX4*0.7
float tmpUY2 = uY3*0.3 + uY4*0.7
// now interpolate in y direction
float combinedX = tmpUX1*0.6 + tmpUX2*0.4
float combinedY = tmpUY1*0.6 + tmpUY2*0.4
and your undistorted point is:
undistortedImage(pX,pY) = distortedImage(floor(combinedX+0.5),floor(combinedY+0.5)); or interpolate pixel values there too.
Hope this helps for a basic understanding. I'll try to add openCV remap code soon! The only point thats unclear for me is, whether the mapping between pX/Y and cX/Y is correct, cause thats not explicitly explained in the documentation.
Here is some code. You can skip the first part, where I am faking a distortion and creating the map, which is your initial state.
With openCV it is simple, just resize the calibration map to your image size and multiply all the values with your resolution. The nice thing is, that openCV performs the interpolation "automatically" while resizing.
int main()
{
cv::Mat input = cv::imread("../Data/Lenna.png");
cv::Mat distortedImage = input.clone();
// now i fake some distortion:
cv::Mat transformation = cv::Mat::eye(3,3,CV_64FC1);
transformation.at<double>(0,0) = 2.0;
cv::warpPerspective(input,distortedImage,transformation,input.size());
cv::imshow("distortedImage", distortedImage);
//cv::imwrite("../Data/LenaFakeDistorted.png", distortedImage);
// now fake a calibration map corresponding to my faked distortion:
const unsigned int cmWidth = 64;
const unsigned int cmHeight = 64;
// compute the calibration map by transforming image locations to values between 0 and 1 for legal positions.
float calibMap[cmWidth*cmHeight*2];
for(unsigned int y = 0; y < cmHeight; ++y)
for(unsigned int x = 0; x < cmWidth; ++x)
{
float xx = (float)x/(float)cmWidth;
xx = xx*2.0f; // this if from my fake distortion... this gives some values bigger than 1
float yy = (float)y/(float)cmHeight;
calibMap[y*cmWidth*2+ 2*x] = xx;
calibMap[y*cmWidth*2+ 2*x+1] = yy;
}
// NOW you have the initial situation of your scenario: calibration map and distorted image...
// compute the image locations of calibration map values:
cv::Mat cMapMatX = cv::Mat(cmHeight, cmWidth, CV_32FC1);
cv::Mat cMapMatY = cv::Mat(cmHeight, cmWidth, CV_32FC1);
for(int j=0; j<cmHeight; ++j)
for(int i=0; i<cmWidth; ++i)
{
cMapMatX.at<float>(j,i) = calibMap[j*cmWidth*2 +2*i];
cMapMatY.at<float>(j,i) = calibMap[j*cmWidth*2 +2*i+1];
}
//cv::imshow("mapX",cMapMatX);
//cv::imshow("mapY",cMapMatY);
// interpolate those values for each of your original images pixel:
// here I use linear interpolation, you could use cubic or other interpolation too.
cv::resize(cMapMatX, cMapMatX, distortedImage.size(), 0,0, CV_INTER_LINEAR);
cv::resize(cMapMatY, cMapMatY, distortedImage.size(), 0,0, CV_INTER_LINEAR);
// now the calibration map has the size of your original image, but its values are still between 0 and 1 (for legal positions)
// so scale to image size:
cMapMatX = distortedImage.cols * cMapMatX;
cMapMatY = distortedImage.rows * cMapMatY;
// now create undistorted image:
cv::Mat undistortedImage = cv::Mat(distortedImage.rows, distortedImage.cols, CV_8UC3);
undistortedImage.setTo(cv::Vec3b(0,0,0)); // initialize black
//cv::imshow("undistorted", undistortedImage);
for(int j=0; j<undistortedImage.rows; ++j)
for(int i=0; i<undistortedImage.cols; ++i)
{
cv::Point undistPosition;
undistPosition.x =(cMapMatX.at<float>(j,i)); // this will round the position, maybe you want interpolation instead
undistPosition.y =(cMapMatY.at<float>(j,i));
if(undistPosition.x >= 0 && undistPosition.x < distortedImage.cols
&& undistPosition.y >= 0 && undistPosition.y < distortedImage.rows)
{
undistortedImage.at<cv::Vec3b>(j,i) = distortedImage.at<cv::Vec3b>(undistPosition);
}
}
cv::imshow("undistorted", undistortedImage);
cv::waitKey(0);
//cv::imwrite("../Data/LenaFakeUndistorted.png", undistortedImage);
}
cv::Mat SelfDescriptorDistances(cv::Mat descr)
{
cv::Mat selfDistances = cv::Mat::zeros(descr.rows,descr.rows, CV_64FC1);
for(int keyptNr = 0; keyptNr < descr.rows; ++keyptNr)
{
for(int keyptNr2 = 0; keyptNr2 < descr.rows; ++keyptNr2)
{
double euclideanDistance = 0;
for(int descrDim = 0; descrDim < descr.cols; ++descrDim)
{
double tmp = descr.at<float>(keyptNr,descrDim) - descr.at<float>(keyptNr2, descrDim);
euclideanDistance += tmp*tmp;
}
euclideanDistance = sqrt(euclideanDistance);
selfDistances.at<double>(keyptNr, keyptNr2) = euclideanDistance;
}
}
return selfDistances;
}
I use this as input and fake a remap/distortion from which I compute my calib mat:
input:
faked distortion:
used the map to undistort the image:
TODO: after those computatons use a opencv map with those values to perform faster remapping.
Here's an example on how to do it without using OpenCV. The following seems to be faster than using the Leap::Image::warp() method (probably due to the additional function call overhead when using warp()):
float destinationWidth = 320;
float destinationHeight = 120;
unsigned char destination[(int)destinationWidth][(int)destinationHeight];
//define needed variables outside the inner loop
float calX, calY, weightX, weightY, dX1, dX2, dX3, dX4, dY1, dY2, dY3, dY4, dX, dY;
int x1, x2, y1, y2, denormalizedX, denormalizedY;
int x, y;
const unsigned char* raw = image.data();
const float* distortion_buffer = image.distortion();
//Local variables for values needed in loop
const int distortionWidth = image.distortionWidth();
const int width = image.width();
const int height = image.height();
for (x = 0; x < destinationWidth; x++) {
for (y = 0; y < destinationHeight; y++) {
//Calculate the position in the calibration map (still with a fractional part)
calX = 63 * x/destinationWidth;
calY = 63 * y/destinationHeight;
//Save the fractional part to use as the weight for interpolation
weightX = calX - truncf(calX);
weightY = calY - truncf(calY);
//Get the x,y coordinates of the closest calibration map points to the target pixel
x1 = calX; //Note truncation to int
y1 = calY;
x2 = x1 + 1;
y2 = y1 + 1;
//Look up the x and y values for the 4 calibration map points around the target
// (x1, y1) .. .. .. (x2, y1)
// .. ..
// .. (x, y) ..
// .. ..
// (x1, y2) .. .. .. (x2, y2)
dX1 = distortion_buffer[x1 * 2 + y1 * distortionWidth];
dX2 = distortion_buffer[x2 * 2 + y1 * distortionWidth];
dX3 = distortion_buffer[x1 * 2 + y2 * distortionWidth];
dX4 = distortion_buffer[x2 * 2 + y2 * distortionWidth];
dY1 = distortion_buffer[x1 * 2 + y1 * distortionWidth + 1];
dY2 = distortion_buffer[x2 * 2 + y1 * distortionWidth + 1];
dY3 = distortion_buffer[x1 * 2 + y2 * distortionWidth + 1];
dY4 = distortion_buffer[x2 * 2 + y2 * distortionWidth + 1];
//Bilinear interpolation of the looked-up values:
// X value
dX = dX1 * (1 - weightX) * (1- weightY) + dX2 * weightX * (1 - weightY) + dX3 * (1 - weightX) * weightY + dX4 * weightX * weightY;
// Y value
dY = dY1 * (1 - weightX) * (1- weightY) + dY2 * weightX * (1 - weightY) + dY3 * (1 - weightX) * weightY + dY4 * weightX * weightY;
// Reject points outside the range [0..1]
if((dX >= 0) && (dX <= 1) && (dY >= 0) && (dY <= 1)) {
//Denormalize from [0..1] to [0..width] or [0..height]
denormalizedX = dX * width;
denormalizedY = dY * height;
//look up the brightness value for the target pixel
destination[x][y] = raw[denormalizedX + denormalizedY * width];
} else {
destination[x][y] = -1;
}
}
}

Pixel Shader performance on xbox

I've got a pixelshader (below) that i'm using with XNA. On my laptop (crappy graphics card) it runs a little jerky, but ok. I've just tried running it on the xbox and it's horrible!
There's nothing to the game (it's just a fractal renderer) so it's got to be the pixel shader causing the issues. I also think it's the PS code because i've lowered the iterations and it's ok. I've also checked, and the GC delta is zero.
Are there any HLSL functions that are no-no's on the xbox?? I must be doing something wrong here, performance can't be that bad!
#include "FractalBase.fxh"
float ZPower;
float3 Colour;
float3 ColourScale;
float ComAbs(float2 Arg)
{
return sqrt(Arg.x * Arg.x + Arg.y * Arg.y);
}
float2 ComPow(float2 Arg, float Power)
{
float Mod = pow(Arg.x * Arg.x + Arg.y * Arg.y, Power / 2);
float Ang = atan2(Arg.y, Arg.x) * Power;
return float2(Mod * cos(Ang), Mod * sin(Ang));
}
float4 FractalPixelShader(float2 texCoord : TEXCOORD0, uniform float Iterations) : COLOR0
{
float2 c = texCoord.xy;
float2 z = 0;
float i;
float oldBailoutTest = 0;
float bailoutTest = 0;
for(i = 0; i < Iterations; i++)
{
z = ComPow(z, ZPower) + c;
bailoutTest = z.x * z.x + z.y * z.y;
if(bailoutTest >= ZPower * ZPower)
{
break;
}
oldBailoutTest = bailoutTest;
}
float normalisedIterations = i / Iterations;
float factor = (bailoutTest - oldBailoutTest) / (ZPower * ZPower - oldBailoutTest);
float4 Result = normalisedIterations + (1 / factor / Iterations);
Result = (i >= Iterations - 1) ? float4(0.0, 0.0, 0.0, 1.0) : float4(Result.x * Colour.r * ColourScale.x, Result.y * Colour.g * ColourScale.y, Result.z * Colour.b * ColourScale.z, 1);
return Result;
}
technique Technique1
{
pass
{
VertexShader = compile vs_3_0 SpriteVertexShader();
PixelShader = compile ps_3_0 FractalPixelShader(128);
}
}
Below is FractalBase.fxh:
float4x4 MatrixTransform : register(vs, c0);
float2 Pan;
float Zoom;
float Aspect;
void SpriteVertexShader(inout float4 Colour : COLOR0,
inout float2 texCoord : TEXCOORD0,
inout float4 position : SV_Position)
{
position = mul(position, MatrixTransform);
// Convert the position into from screen space into complex coordinates
texCoord = (position) * Zoom * float2(1, Aspect) - float2(Pan.x, -Pan.y);
}
EDIT I did try removing the conditional by using lots of lerps, however when i did that i got loads of artifacts (and not the kind that "belong in a museum"!). I changed things around, and fixed a few logic errors, however the key was to multiply the GreaterThan result by 1 + epsilon, to account for rounding errors just making 0.9999 = 0 (integer). See the fixed code below:
#include "FractalBase.fxh"
float ZPower;
float3 Colour;
float3 ColourScale;
float ComAbs(float2 Arg)
{
return sqrt(Arg.x * Arg.x + Arg.y * Arg.y);
}
float2 ComPow(float2 Arg, float Power)
{
float Mod = pow(Arg.x * Arg.x + Arg.y * Arg.y, Power / 2);
float Ang = atan2(Arg.y, Arg.x) * Power;
return float2(Mod * cos(Ang), Mod * sin(Ang));
}
float GreaterThan(float x, float y)
{
return ((x - y) / (2 * abs(x - y)) + 0.5) * 1.001;
}
float4 FractalPixelShader(float2 texCoord : TEXCOORD0, uniform float Iterations) : COLOR0
{
float2 c = texCoord.xy;
float2 z = 0;
int i;
float oldBailoutTest = 0;
float bailoutTest = 0;
int KeepGoing = 1;
int DoneIterations = Iterations;
int Bailout = 0;
for(i = 0; i < Iterations; i++)
{
z = lerp(z, ComPow(z, ZPower) + c, KeepGoing);
bailoutTest = lerp(bailoutTest, z.x * z.x + z.y * z.y, KeepGoing);
Bailout = lerp(Bailout, GreaterThan(bailoutTest, ZPower * ZPower), -abs(Bailout) + 1);
KeepGoing = lerp(KeepGoing, 0.0, Bailout);
DoneIterations = lerp(DoneIterations, min(i, DoneIterations), Bailout);
oldBailoutTest = lerp(oldBailoutTest, bailoutTest, KeepGoing);
}
float normalisedIterations = DoneIterations / Iterations;
float factor = (bailoutTest - oldBailoutTest) / (ZPower * ZPower - oldBailoutTest);
float4 Result = normalisedIterations + (1 / factor / Iterations);
Result = (DoneIterations >= Iterations - 1) ? float4(0.0, 0.0, 0.0, 1.0) : float4(Result.x * Colour.r * ColourScale.x, Result.y * Colour.g * ColourScale.y, Result.z * Colour.b * ColourScale.z, 1);
return Result;
}
technique Technique1
{
pass
{
VertexShader = compile vs_3_0 SpriteVertexShader();
PixelShader = compile ps_3_0 FractalPixelShader(128);
}
}
The xbox has a pretty large block size, so branching on the xbox isn't always so great. Also the compiler isn't always the most effective at emitting dynamic branches which your code seems to use.
Look into the branch attribute: http://msdn.microsoft.com/en-us/library/bb313972%28v=xnagamestudio.31%29.aspx
Also, if you move the early bailout, does the PC become more more similar to the Xbox?
Keep in mind that modern graphic cards are actually quite a bit faster then the Xenon unit by now.

Resources