Rotating around created vertices - xna

i recently learn xna and is trying to create a room using vertices. I went and try out Riemeres XNA tutorial and learn alot but i can't seem to make my camera work well, every time i move left or right some of my image or textured seems to disappear and reappear. Please Help.
Here is my code.
public struct MyOwnVertexFormat
{
public Vector3 position;
private Vector2 texCoord;
public MyOwnVertexFormat(Vector3 position, Vector2 texCoord)
{
this.position = position;
this.texCoord = texCoord;
}
public readonly static VertexDeclaration VertexDeclaration = new VertexDeclaration
(
new VertexElement(0, VertexElementFormat.Vector3, VertexElementUsage.Position, 0),
new VertexElement(sizeof(float) * 3, VertexElementFormat.Vector2, VertexElementUsage.TextureCoordinate, 0)
);
}
public class Game1 : Microsoft.Xna.Framework.Game
{
GraphicsDeviceManager graphics;
GraphicsDevice device;
Effect effect;
Matrix viewMatrix;
Matrix projectionMatrix;
VertexBuffer vertexBuffer;
Vector3 cameraPos;
Texture2D streetTexture;
private Vector3 Position = Vector3.One;
private float Zoom = 2500;
private float RotationY = 0.0f;
private float RotationX = 0.0f;
private Matrix gameWorldRotation;
public Game1()
{
graphics = new GraphicsDeviceManager(this);
Content.RootDirectory = "Content";
}
protected override void Initialize()
{
graphics.PreferredBackBufferWidth =1024;
graphics.PreferredBackBufferHeight = 768;
graphics.IsFullScreen = false;
graphics.ApplyChanges();
base.Initialize();
}
protected override void LoadContent()
{
device = GraphicsDevice;
effect = Content.Load<Effect>("OurHLSLfile"); SetUpVertices();
SetUpCamera();
streetTexture = Content.Load<Texture2D>("streettexture");
}
private void UpdateKeyboard()
{
if (Keyboard.GetState().IsKeyDown(Keys.Escape))
Exit();
if (Keyboard.GetState().IsKeyDown(Keys.Up))
RotationX += 1.0f;
if (Keyboard.GetState().IsKeyDown(Keys.Down))
RotationX -= 1.0f;
if (Keyboard.GetState().IsKeyDown(Keys.Left))
RotationY += 1.0f;
if (Keyboard.GetState().IsKeyDown(Keys.Right))
RotationY -= 1.0f;
gameWorldRotation =
Matrix.CreateRotationX(MathHelper.ToRadians(RotationX)) *
Matrix.CreateRotationY(MathHelper.ToRadians(RotationY));
}
private void SetUpVertices()
{
MyOwnVertexFormat[] vertices = new MyOwnVertexFormat[12];
vertices[0] = new MyOwnVertexFormat(new Vector3(-20, 0, 10), new Vector2(-0.25f, 25.0f));
vertices[1] = new MyOwnVertexFormat(new Vector3(-20, 0, -100), new Vector2(-0.25f, 0.0f));
vertices[2] = new MyOwnVertexFormat(new Vector3(2, 0, 10), new Vector2(0.25f, 25.0f));
vertices[3] = new MyOwnVertexFormat(new Vector3(2, 0, -100), new Vector2(0.25f, 0.0f));
vertices[4] = new MyOwnVertexFormat(new Vector3(2, 1, 10), new Vector2(0.375f, 25.0f));
vertices[5] = new MyOwnVertexFormat(new Vector3(2, 1, -100), new Vector2(0.375f, 0.0f));
vertices[6] = new MyOwnVertexFormat(new Vector3(3, 1, 10), new Vector2(0.5f, 25.0f));
vertices[7] = new MyOwnVertexFormat(new Vector3(3, 1, -100), new Vector2(0.5f, 0.0f));
vertices[8] = new MyOwnVertexFormat(new Vector3(-13, 1, 10), new Vector2(0.75f, 25.0f));
vertices[9] = new MyOwnVertexFormat(new Vector3(-13, 1, -100), new Vector2(0.75f, 0.0f));
vertices[10] = new MyOwnVertexFormat(new Vector3(-13, 21, 10), new Vector2(1.25f, 25.0f));
vertices[11] = new MyOwnVertexFormat(new Vector3(-13, 21, -100), new Vector2(1.25f, 0.0f));
vertexBuffer = new VertexBuffer(device, MyOwnVertexFormat.VertexDeclaration, vertices.Length, BufferUsage.WriteOnly);
vertexBuffer.SetData(vertices);
}
private void SetUpCamera()
{
cameraPos = new Vector3(-25, 13, 75);
viewMatrix = Matrix.CreateLookAt(cameraPos, new Vector3(0, 2, -12), new Vector3(0, 1, 0));
projectionMatrix = Matrix.CreatePerspectiveFieldOfView(MathHelper.PiOver4, device.Viewport.AspectRatio, 1.0f, 5000.0f);
}
protected override void UnloadContent()
{
}
protected override void Update(GameTime gameTime)
{
if (GamePad.GetState(PlayerIndex.One).Buttons.Back == ButtonState.Pressed)
this.Exit();
UpdateKeyboard();
base.Update(gameTime);
}
protected override void Draw(GameTime gameTime)
{
device.Clear(ClearOptions.Target | ClearOptions.DepthBuffer, Color.DarkSlateBlue, 1.0f, 0);
effect.CurrentTechnique = effect.Techniques["Simplest"];
effect.Parameters["xViewProjection"].SetValue(viewMatrix * projectionMatrix * gameWorldRotation);
effect.Parameters["xTexture"].SetValue(streetTexture);
foreach (EffectPass pass in effect.CurrentTechnique.Passes)
{
pass.Apply();
device.SetVertexBuffer(vertexBuffer);
device.DrawPrimitives(PrimitiveType.TriangleStrip, 0, 10);
}
base.Draw(gameTime);
}
}

I assume it's a projection issue. This line, taken from your code, strengthens my assumption:
viewMatrix * projectionMatrix * gameWorldRotation
tl;dr, the correct order is:
gameWorldRotation * viewMatrix * projectionMatrix
Please remember that order matters when multiplying matrices. In mathematical terms:
Matrix multiplication is not commutative!
Those three matrices map a vector to three different coordinate systems, the world, view and projection space. Usually vertices are defined in object space. Multiplying a vector with the world (view, projection) matrix brings your vector to world (view, projection) space:
object space => world space => view space => projection space
XNA uses a row vector layout (as opposed to column vectors). That means a vector is declared horizontally (x, y, z). Skipping the dry interesting details, it means that when transforming a vector (multiplying the vector with a matrix), the vector is the left operand, while the matrix is the right operand:
A := [3x3 Matrix]
(x, y, z) * A = (x', y', z') // The result is another 3D vector
Now in order to apply all three matrices we simply use the result of the previous transformation as input for the next transform:
W .... world matrix
V .... view matrix
W .... projection matrix
x .... vector
x' ... transformed vector
x' = ((x * W) * V) * P
Finally, matrix multiplication is associative (braces don't matter). And that's why we can combine the world, view and projection matrix into a single matrix before sending it to the device:
x' = ((x * W) * V) * P = x * W * V * P = x * (W * V * P)
World * View * Projection. That's all you need. (And maybe some basic matrix math for your future work.)

Related

Having problems rending 3d models on aruco markers using tvec/rvec

My graduation project team and I are working on marker-based AR application where in one of the tasks we want to draw some 3d models on markers, we are using opencv to detect markers, and rajawali for drawing the 3d models.
The problem is the tvec/rvec we get from Aruco.estimatePoseSingleMarkers(...) doesn't map correctly to location and rotation on the markers, although we can draw the axis accurately.
image of the 3d model on the marker
So we wanted to ask:
Is there a processing needed on the tvec/rvec needed before using them to get the position and rotation?
Is there some alternatives for marker-detection that are more convenient to use than opencv with rajawali?
What could be the cause for them to be inaccurate?
Code:
marker-detection
public void markerDetection(Mat frame)
{
ids = new Mat();
corners = new ArrayList<>();
Aruco.detectMarkers(frame, markerDictionary, corners, ids);
List<MarkerData> newDetectedMarkers = new ArrayList<>();
if(ids.size().height > 0)
{
rvecs = new Mat();
tvecs = new Mat();
Aruco.estimatePoseSingleMarkers(corners, markerLength, cameraMatrix, distortionCoef, rvecs, tvecs);
for(int i=0; i<ids.size().height; i++)
{
double[] rvecArray = rvecs.get(i, 0), tvecArray = tvecs.get(i, 0);
Mat rvec = new Mat(3, 1, CvType.CV_64FC1), tvec = new Mat(3, 1, CvType.CV_64FC1);
for (int j = 0; j < 3; ++j) {
rvec.put(j, 0, rvecArray[j]);
tvec.put(j, 0, tvecArray[j]);
}
multiply(rvec, new Scalar(180.0 / Math.PI), rvec); // transform them to degree
MarkerData newMarker = new MarkerData(rvec, tvec, corners.get(i), (int)ids.get(i, 0)[0]);
newDetectedMarkers.add(newMarker);
}
}
updateDetectedMarkers(newDetectedMarkers); // update the detected markers
}
Rendering
#Override
protected void onRender(long elapsedRealtime, double deltaTime) {
super.onRender(elapsedRealtime, deltaTime);
getCurrentScene().clearChildren();
List<MarkerData> markerData = markerDetector.getDetectedMarkers();
for (MarkerData marker : markerData) {
try {
int id = R.raw.monkey;
LoaderOBJ parser = new LoaderOBJ(mContext.getResources(), mTextureManager, id);
parser.parse();
Object3D object = parser.getParsedObject();
object.setMaterial(defaultMaterial);
object.setScale(0.3);
Mat rvec = marker.getRvec(); // 3x1 Mat
Mat tvec = marker.getTvec(); // 3x1 Mat
object.setRotation(new Vector3(rvec.get(0, 0)[0], rvec.get(1, 0)[0], rvec.get(2, 0)[0]));
object.setPosition(new Vector3(tvec.get(0, 0)[0], tvec.get(1, 0)[0], tvec.get(2, 0)[0]));
getCurrentScene().addChild(object);
} catch (ParsingException e) {
e.printStackTrace();
}
}
}

Unity3D & OpenCV using a custom camera (AruCo)

I have been trying to perform AruCo or Marker detection in unity using a customer camera that can provide a texture, this is not a regular camera so you cant use the regular webcamtexture that comes with the package, i have been trying everything that i can imagine but it just dont work for me. I have tested so many things so ill post my code for AruCo and maybe someone can help me fix this, the code dont show any errors but it also doesnt detect anything or at minimum i was expecting the rejected corners to be detected.
using UnityEngine;
using UnityEngine.UI;
using UnityEngine.SceneManagement;
using System.Collections;
using System.Collections.Generic;
using OpenCVForUnity.CoreModule;
using OpenCVForUnity.Calib3dModule;
using OpenCVForUnity.ArucoModule;
using OpenCVForUnity.UnityUtils;
using Leap.Unity;
using Utils = OpenCVForUnity.UnityUtils.Utils;
using OpenCVForUnity.ImgprocModule;
namespace OpenCVForUnityExample
{
/// <summary>
/// ArUco Example
/// An example of marker-based AR view and camera pose estimation using the aruco (ArUco Marker Detection) module.
/// Referring to https://github.com/opencv/opencv_contrib/blob/master/modules/aruco/samples/detect_markers.cpp.
/// http://docs.opencv.org/3.1.0/d5/dae/tutorial_aruco_detection.html
/// </summary>
public class ArUcoExample_LM : MonoBehaviour
{
/// <summary>
/// The image texture.
/// </summary>
public Texture2D imgTexture;
[Space (10)]
/// <summary>
/// The dictionary identifier.
/// </summary>
public ArUcoDictionary dictionaryId = ArUcoDictionary.DICT_6X6_250;
/// <summary>
/// The dictionary id dropdown.
/// </summary>
public Dropdown dictionaryIdDropdown;
/// <summary>
/// Determines if shows rejected corners.
/// </summary>
public bool showRejectedCorners = false;
/// <summary>
/// The shows rejected corners toggle.
/// </summary>
public Toggle showRejectedCornersToggle;
/// <summary>
/// Determines if applied the pose estimation.
/// </summary>
public bool applyEstimationPose = true;
/// <summary>
/// The length of the markers' side. Normally, unit is meters.
/// </summary>
public float markerLength = 0.1f;
/// <summary>
/// The AR game object.
/// </summary>
public GameObject arGameObject;
/// <summary>
/// The AR camera.
/// </summary>
public Camera arCamera;
[Space (10)]
/// <summary>
/// Determines if request the AR camera moving.
/// </summary>
public bool shouldMoveARCamera = false;
/// <summary>
/// The rgb mat.
/// </summary>
Mat rgbMat;
Mat ids;
List<Mat> corners;
List<Mat> rejectedCorners;
Mat rvecs;
Mat tvecs;
Mat rotMat;
Mat gray;
Mat bw;
DetectorParameters detectorParams;
Dictionary dictionary;
/// <summary>
/// The texture.
/// </summary>
public Texture2D texture;
public LM_Texture2DToMat_ZED LMD;
Mat hierarchy;
List<MatOfPoint> contours;
// Use this for initialization
void Start ()
{
ids = new Mat();
corners = new List<Mat>();
rejectedCorners = new List<Mat>();
rvecs = new Mat();
tvecs = new Mat();
rotMat = new Mat(3, 3, CvType.CV_64FC1);
detectorParams = DetectorParameters.create();
dictionary = Aruco.getPredefinedDictionary((int)dictionaryId);
///////////////////
///
hierarchy = new Mat();
contours = new List<MatOfPoint>();
/////////////////////////////////////
}
// Update is called once per frame
void Update()
{
imgTexture = LMD.myCroppedTex2d;
if (imgTexture != null)
{
//gameObject.GetComponent<Renderer>().material.mainTexture = imgTexture; //THIS WAS JUST TO VALIDATE THE TEXTURE WAS COMMING PROPERLY.
if (rgbMat == null)
{
//rgbMat = new Mat(imgTexture.height, imgTexture.width, CvType.CV_8UC3);
rgbMat = LMD.outputMAT;
rgbMat.convertTo(rgbMat, CvType.CV_8UC3);
Debug.LogWarning("MAT INFO: "+rgbMat+" COLS: "+rgbMat.cols());
texture = new Texture2D(rgbMat.cols(), rgbMat.rows(), TextureFormat.RGBA32, false);
}
dictionaryIdDropdown.value = (int)dictionaryId;
showRejectedCornersToggle.isOn = showRejectedCorners;
DetectMarkers();
}
}
private void DetectMarkers ()
{
Utils.texture2DToMat (imgTexture, rgbMat); // <-- does not work the image breaks from there.
//Debug.Log ("imgMat dst ToString " + rgbMat.ToString ());
/////
//rgbMat = LMD.outputMAT;
//rgbMat.convertTo(rgbMat, CvType.CV_32SC2);
//rgbMat.convertTo(rgbMat, CvType.CV_8UC1);
//Debug.Log("imgMat dst ToString " + rgbMat.ToString());
//Utils.matToTexture2D(rgbMat, texture); // <-- if you disable line 121,297 and 295 and comment out this 3 lines, you get a blinking image.
//gameObject.GetComponent<Renderer>().material.mainTexture = texture;
/////
//gameObject.transform.localScale = new Vector3 (imgTexture.width, imgTexture.height, 1);
//Debug.Log ("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);
float width = rgbMat.width ();
float height = rgbMat.height ();
float imageSizeScale = 1.0f;
float widthScale = (float)Screen.width / width;
float heightScale = (float)Screen.height / height;
if (widthScale < heightScale) {
Camera.main.orthographicSize = (width * (float)Screen.height / (float)Screen.width) / 2;
imageSizeScale = (float)Screen.height / (float)Screen.width;
} else {
Camera.main.orthographicSize = height / 2;
}
// set camera parameters.
int max_d = (int)Mathf.Max (width, height);
double fx = max_d;
double fy = max_d;
double cx = width / 2.0f;
double cy = height / 2.0f;
Mat camMatrix = new Mat (3, 3, CvType.CV_64FC1);
camMatrix.put (0, 0, fx);
camMatrix.put (0, 1, 0);
camMatrix.put (0, 2, cx);
camMatrix.put (1, 0, 0);
camMatrix.put (1, 1, fy);
camMatrix.put (1, 2, cy);
camMatrix.put (2, 0, 0);
camMatrix.put (2, 1, 0);
camMatrix.put (2, 2, 1.0f);
//Debug.Log ("camMatrix " + camMatrix.dump ()); //Dont care about this right now
MatOfDouble distCoeffs = new MatOfDouble (0, 0, 0, 0);
//Debug.Log ("distCoeffs " + distCoeffs.dump ()); //Dont care about this right now
// calibration camera matrix values.
Size imageSize = new Size (width * imageSizeScale, height * imageSizeScale);
double apertureWidth = 0;
double apertureHeight = 0;
double[] fovx = new double[1];
double[] fovy = new double[1];
double[] focalLength = new double[1];
Point principalPoint = new Point (0, 0);
double[] aspectratio = new double[1];
Calib3d.calibrationMatrixValues (camMatrix, imageSize, apertureWidth, apertureHeight, fovx, fovy, focalLength, principalPoint, aspectratio);
//Debug.Log ("imageSize " + imageSize.ToString ());
//Debug.Log ("apertureWidth " + apertureWidth);
//Debug.Log ("apertureHeight " + apertureHeight);
//Debug.Log ("fovx " + fovx [0]);
//Debug.Log ("fovy " + fovy [0]);
//Debug.Log ("focalLength " + focalLength [0]);
//Debug.Log ("principalPoint " + principalPoint.ToString ());
//Debug.Log ("aspectratio " + aspectratio [0]);
// To convert the difference of the FOV value of the OpenCV and Unity.
double fovXScale = (2.0 * Mathf.Atan ((float)(imageSize.width / (2.0 * fx)))) / (Mathf.Atan2 ((float)cx, (float)fx) + Mathf.Atan2 ((float)(imageSize.width - cx), (float)fx));
double fovYScale = (2.0 * Mathf.Atan ((float)(imageSize.height / (2.0 * fy)))) / (Mathf.Atan2 ((float)cy, (float)fy) + Mathf.Atan2 ((float)(imageSize.height - cy), (float)fy));
//Debug.Log ("fovXScale " + fovXScale); //Dont care about this right now
//Debug.Log ("fovYScale " + fovYScale); //Dont care about this right now
// Adjust Unity Camera FOV https://github.com/opencv/opencv/commit/8ed1945ccd52501f5ab22bdec6aa1f91f1e2cfd4
if (widthScale < heightScale) {
arCamera.fieldOfView = (float)(fovx [0] * fovXScale);
} else {
arCamera.fieldOfView = (float)(fovy [0] * fovYScale);
}
// Display objects near the camera.
arCamera.nearClipPlane = 0.01f;
//Debug.Log("RGBMAT " + rgbMat);
//Debug.Log("Dictionary " + dictionary);
//Debug.Log("corners " + corners);
//Debug.Log("ids " + ids);
//Debug.Log("detectorParams " + detectorParams);
//Debug.Log("rejectedCorners " + rejectedCorners);
//Debug.Log("camMatrix " + camMatrix);
//Debug.Log("distCoeffs " + distCoeffs);
/////////////////////////////
if (gray == null)
{
gray = new Mat();
}
Imgproc.cvtColor(rgbMat, gray, Imgproc.COLOR_BGR2GRAY);
// Convert image to binary
if (bw == null)
{
bw = new Mat();
}
Imgproc.threshold(gray, bw, 50, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);
/////////////////////////////
///
// detect markers. ////////////////////////////////////// if you enable it breaks
Aruco.detectMarkers (gray, dictionary, corners, ids, detectorParams, rejectedCorners, camMatrix, distCoeffs);
// if at least one marker detected
if (ids.total () > 0) {
Debug.Log("some ids");
Aruco.drawDetectedMarkers (rgbMat, corners, ids, new Scalar (0, 255, 0));
// estimate pose.
if (applyEstimationPose) {
Debug.Log("this is progress");
Aruco.estimatePoseSingleMarkers (corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs);
for (int i = 0; i < ids.total (); i++) {
using (Mat rvec = new Mat (rvecs, new OpenCVForUnity.CoreModule.Rect (0, i, 1, 1)))
using (Mat tvec = new Mat (tvecs, new OpenCVForUnity.CoreModule.Rect (0, i, 1, 1))) {
// In this example we are processing with RGB color image, so Axis-color correspondences are X: blue, Y: green, Z: red. (Usually X: red, Y: green, Z: blue)
Calib3d.drawFrameAxes(rgbMat, camMatrix, distCoeffs, rvec, tvec, markerLength * 0.5f);
}
// This example can display the ARObject on only first detected marker.
if (i == 0) {
// Get translation vector
double[] tvecArr = tvecs.get (i, 0);
// Get rotation vector
double[] rvecArr = rvecs.get (i, 0);
Mat rvec = new Mat (3, 1, CvType.CV_64FC1);
rvec.put (0, 0, rvecArr);
// Convert rotation vector to rotation matrix.
Calib3d.Rodrigues (rvec, rotMat);
double[] rotMatArr = new double[rotMat.total ()];
rotMat.get (0, 0, rotMatArr);
// Convert OpenCV camera extrinsic parameters to Unity Matrix4x4.
Matrix4x4 transformationM = new Matrix4x4 (); // from OpenCV
transformationM.SetRow (0, new Vector4 ((float)rotMatArr [0], (float)rotMatArr [1], (float)rotMatArr [2], (float)tvecArr [0]));
transformationM.SetRow (1, new Vector4 ((float)rotMatArr [3], (float)rotMatArr [4], (float)rotMatArr [5], (float)tvecArr [1]));
transformationM.SetRow (2, new Vector4 ((float)rotMatArr [6], (float)rotMatArr [7], (float)rotMatArr [8], (float)tvecArr [2]));
transformationM.SetRow (3, new Vector4 (0, 0, 0, 1));
Debug.Log ("transformationM " + transformationM.ToString ());
Matrix4x4 invertYM = Matrix4x4.TRS (Vector3.zero, Quaternion.identity, new Vector3 (1, -1, 1));
Debug.Log ("invertYM " + invertYM.ToString ());
// right-handed coordinates system (OpenCV) to left-handed one (Unity)
// https://stackoverflow.com/questions/30234945/change-handedness-of-a-row-major-4x4-transformation-matrix
Matrix4x4 ARM = invertYM * transformationM * invertYM;
if (shouldMoveARCamera) {
ARM = arGameObject.transform.localToWorldMatrix * ARM.inverse;
Debug.Log ("ARM " + ARM.ToString ());
ARUtils.SetTransformFromMatrix (arCamera.transform, ref ARM);
} else {
ARM = arCamera.transform.localToWorldMatrix * ARM;
Debug.Log ("ARM " + ARM.ToString ());
ARUtils.SetTransformFromMatrix (arGameObject.transform, ref ARM);
}
}
}
}
}
if (showRejectedCorners && rejectedCorners.Count > 0)
Debug.Log("Show Rejected Corners");
Aruco.drawDetectedMarkers (rgbMat, rejectedCorners, new Mat (), new Scalar (255, 0, 0));
Utils.matToTexture2D (rgbMat, texture);
gameObject.GetComponent<Renderer>().material.mainTexture = texture;
}
private void ResetObjectTransform ()
{
// reset AR object transform.
Matrix4x4 i = Matrix4x4.identity;
ARUtils.SetTransformFromMatrix (arCamera.transform, ref i);
ARUtils.SetTransformFromMatrix (arGameObject.transform, ref i);
}
/// <summary>
/// Raises the destroy event.
/// </summary>
void OnDestroy ()
{
if (rgbMat != null)
rgbMat.Dispose ();
}
/// <summary>
/// Raises the back button click event.
/// </summary>
public void OnBackButtonClick ()
{
SceneManager.LoadScene ("OpenCVForUnityExample");
}
/// <summary>
/// Raises the dictionary id dropdown value changed event.
/// </summary>
public void OnDictionaryIdDropdownValueChanged (int result)
{
if ((int)dictionaryId != result) {
dictionaryId = (ArUcoDictionary)result;
ResetObjectTransform ();
DetectMarkers ();
}
}
/// <summary>
/// Raises the show rejected corners toggle value changed event.
/// </summary>
public void OnShowRejectedCornersToggleValueChanged ()
{
if (showRejectedCorners != showRejectedCornersToggle.isOn) {
showRejectedCorners = showRejectedCornersToggle.isOn;
ResetObjectTransform ();
DetectMarkers ();
}
}
public enum ArUcoDictionary
{
DICT_4X4_50 = Aruco.DICT_4X4_50,
DICT_4X4_100 = Aruco.DICT_4X4_100,
DICT_4X4_250 = Aruco.DICT_4X4_250,
DICT_4X4_1000 = Aruco.DICT_4X4_1000,
DICT_5X5_50 = Aruco.DICT_5X5_50,
DICT_5X5_100 = Aruco.DICT_5X5_100,
DICT_5X5_250 = Aruco.DICT_5X5_250,
DICT_5X5_1000 = Aruco.DICT_5X5_1000,
DICT_6X6_50 = Aruco.DICT_6X6_50,
DICT_6X6_100 = Aruco.DICT_6X6_100,
DICT_6X6_250 = Aruco.DICT_6X6_250,
DICT_6X6_1000 = Aruco.DICT_6X6_1000,
DICT_7X7_50 = Aruco.DICT_7X7_50,
DICT_7X7_100 = Aruco.DICT_7X7_100,
DICT_7X7_250 = Aruco.DICT_7X7_250,
DICT_7X7_1000 = Aruco.DICT_7X7_1000,
DICT_ARUCO_ORIGINAL = Aruco.DICT_ARUCO_ORIGINAL,
}
}
}
The problem with the code was that the images were coming as Alpha8 texture and OpenCV works with RGB texture, so when i was converting to RGB the A part was been strip from the image which was pretty much the only content in it.
The solution was posted on this other thread:
Solution to change format from Alpha8 to RGBA texture.

Scrolling Image using PixelWriter / Reader

I'm trying to create a scrolling image that wraps around a canvas to follow its own tail. I've been trying to use PixelWriters and Readers to save off the vertical pixel lines that are scrolling off the screen to the West, and append these to a new image which, should grow on the RHS (East) of the screen.
It scrolls, but that's all that's happening. I don't understand how to calculate the scanlines, so apologies for this part.
Any help appreciated.
package controller;
import javafx.animation.AnimationTimer;
import javafx.scene.canvas.Canvas;
import javafx.scene.canvas.GraphicsContext;
import javafx.scene.image.*;
import javafx.scene.layout.*;
import util.GraphicsUtils;
import java.io.File;
import java.nio.ByteBuffer;
import java.nio.file.Path;
import java.nio.file.Paths;
class ImageContainer extends HBox {
int w, h;
int translatedAmount = 0;
Image image;
Canvas canvas;
long startNanoTime = System.nanoTime();
WritableImage eastImage = null;
public ImageContainer() {
setVisible(true);
load();
w = (int) image.getWidth();
h = (int) image.getHeight();
canvas = new Canvas(w, h);
int edgeX = (int) canvas.getWidth(); //You can set this a little west for visibility sake...whilst debugging
getChildren().addAll(canvas);
GraphicsContext gc = canvas.getGraphicsContext2D();
canvas.setVisible(true);
gc.drawImage(image, 0, 0, w, h);
setPrefSize(w, h);
eastImage = new WritableImage(translatedAmount+1, h); //create a new eastImage
new AnimationTimer() {
public void handle(long currentNanoTime) {
if (((System.nanoTime() - startNanoTime) / 1000000000.0) < 0.05) {
return;
} else {
startNanoTime = System.nanoTime();
}
translatedAmount++;
Image westLine = getSubImageRectangle(image, 1, 0, 1, h); //get a 1 pixel strip from west of main image
PixelReader westLinepixelReader = westLine.getPixelReader(); //create a pixel reader for this image
byte[] westLinePixelBuffer = new byte[1 * h * 4]; //create a buffer to store the pixels collected from the about to vanish westLine
westLinepixelReader.getPixels(0, 0, 1, h, PixelFormat.getByteBgraInstance(), westLinePixelBuffer, 0, 4); //collect the pixels from westLine strip
Image tempImg = eastImage; //save away the current east side image
byte[] tempBuffer = new byte[(int)tempImg.getWidth() * h * 4];
PixelReader tempImagePixelReader = tempImg.getPixelReader(); //create a pixel reader for our temp copy of the east side image
tempImagePixelReader.getPixels(0, 0, (int)tempImg.getWidth(), h, PixelFormat.getByteBgraInstance(), tempBuffer, 0, 4); //save the tempImage into the tempBuffer
eastImage = new WritableImage(translatedAmount+1, h); //create a new eastImage, but one size larger
PixelWriter eastImagePixelWriter = eastImage.getPixelWriter(); //create a pixel writer for this new east side image
eastImagePixelWriter.setPixels(1, 0, (int)tempImg.getWidth(), h, PixelFormat.getByteBgraInstance(), tempBuffer, 0, 4); //copy the temp image in at x=1
eastImagePixelWriter.setPixels((int)tempImg.getWidth(), 0, 1, h, PixelFormat.getByteBgraInstance(), westLinePixelBuffer, 0, 4); //copy the westLine at x=tempImg.width
image = getSubImageRectangle(image, 1, 0, (int) image.getWidth() - 1, h);
gc.drawImage(image, 0, 0); //draw main image
System.out.println(edgeX-eastImage.getWidth());
gc.drawImage(eastImage, edgeX-eastImage.getWidth(), 0); //add lost image lines
}
}.start();
}
public void load() {
Path imagePath = Paths.get("./src/main/resources/ribbonImages/clouds.png");
File f = imagePath.toFile();
assert f.exists();
image = new Image(f.toURI().toString());
}
public Image getSubImageRectangle(Image image, int x, int y, int w, int h) {
PixelReader pixelReader = image.getPixelReader();
WritableImage newImage = new WritableImage(pixelReader, x, y, w, h);
ImageView imageView = new ImageView();
imageView.setImage(newImage);
return newImage;
}
}
Why make this more difficult than necessary? Simply draw the image to the Canvas twice:
public static void drawImage(Canvas canvas, Image sourceImage, double offset, double wrapWidth) {
GraphicsContext gc = canvas.getGraphicsContext2D();
gc.clearRect(0, 0, canvas.getWidth(), canvas.getHeight());
// make |offset| < wrapWidth
offset %= wrapWidth;
if (offset < 0) {
// make sure positive offsets do not result in the previous version
// of the image not being drawn
offset += wrapWidth;
}
gc.drawImage(sourceImage, -offset, 0);
gc.drawImage(sourceImage, wrapWidth - offset, 0);
}
#Override
public void start(Stage primaryStage) {
Image image = new Image("https://upload.wikimedia.org/wikipedia/commons/thumb/e/ec/Mona_Lisa%2C_by_Leonardo_da_Vinci%2C_from_C2RMF_retouched.jpg/402px-Mona_Lisa%2C_by_Leonardo_da_Vinci%2C_from_C2RMF_retouched.jpg");
Canvas canvas = new Canvas(image.getWidth(), image.getHeight());
primaryStage.setResizable(false);
Scene scene = new Scene(new Group(canvas));
DoubleProperty offset = new SimpleDoubleProperty();
offset.addListener((observable, oldOffset, newOffset) -> drawImage(canvas, image, newOffset.doubleValue(), canvas.getWidth()));
Timeline timeline = new Timeline(
new KeyFrame(Duration.ZERO, new KeyValue(offset, 0, Interpolator.LINEAR)),
new KeyFrame(Duration.seconds(10), new KeyValue(offset, image.getWidth()*2, Interpolator.LINEAR))
);
timeline.setCycleCount(Animation.INDEFINITE);
timeline.play();
primaryStage.setScene(scene);
primaryStage.sizeToScene();
primaryStage.show();
}

Advanced denoise Image using Opencv

I am trying denoise this image to get better edges
I've tried bilaterFilter, GaussianBlur, morphological close and several threshold but every time I get an image like:
and when I do the HoughLinesP with dilatation of edges is really bad result.
Can some one help me to improve this? Is there a some way to take out those noise?
Frist try: using GaussianBlur, in this case, I must use equalizeHist or I cant get edges even if I use a really low threshold
public class TesteNormal {
static {
System.loadLibrary("opencv_java310");
}
public static void main(String args[]) {
Mat imgGrayscale = new Mat();
Mat imgBlurred = new Mat();
Mat imgCanny = new Mat();
Mat image = Imgcodecs.imread("c:\\cordova\\imagens\\teste.jpg", 1);
int imageWidth = image.width();
int imageHeight = image.height();
Imgproc.cvtColor(image, imgGrayscale, Imgproc.COLOR_BGR2GRAY);
Imgproc.equalizeHist(imgGrayscale, imgGrayscale);
Imgproc.GaussianBlur(imgGrayscale, imgBlurred, new Size(5, 5), 1.8);
Photo.fastNlMeansDenoising(imgBlurred, imgBlurred);
Imshow.show(imgBlurred);
Mat imgKernel = Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3));
Imgproc.Canny(imgBlurred, imgCanny, 0, 80);
Imshow.show(imgCanny);
Imgproc.dilate(imgCanny, imgCanny, imgKernel, new Point(-1, -1), 2);
Imgproc.erode(imgCanny, imgCanny, imgKernel, new Point(-1, -1), 1);
Imshow.show(imgCanny);
Mat lines = new Mat();
int threshold = 100;
int minLineSize = imageWidth < imageHeight ? imageWidth / 3 : imageHeight / 3;
int lineGap = 5;
Imgproc.HoughLinesP(imgCanny, lines, 1, Math.PI / 360, threshold, minLineSize, lineGap);
System.out.println(lines.rows());
for(int x = 0; x < lines.rows(); x++) {
double[] vec = lines.get(x, 0);
double x1 = vec[0], y1 = vec[1], x2 = vec[2], y2 = vec[3];
Point start = new Point(x1, y1);
Point end = new Point(x2, y2);
Imgproc.line(image, start, end, new Scalar(255, 0, 0), 1);
}
Imshow.show(image);
}
}
Second try: using bilateral filter:
public class TesteNormal {
static {
System.loadLibrary("opencv_java310");
}
public static void main(String args[]) {
Mat imgBlurred = new Mat();
Mat imgCanny = new Mat();
Mat image = Imgcodecs.imread("c:\\cordova\\imagens\\teste.jpg", 1);
int imageWidth = image.width();
int imageHeight = image.height();
Imgproc.bilateralFilter(image, imgBlurred, 10, 35, 35);
Imshow.show(imgBlurred);
Mat imgKernel = Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3));
Imgproc.Canny(imgBlurred, imgCanny, 0, 120);
Imshow.show(imgCanny);
Imgproc.dilate(imgCanny, imgCanny, imgKernel, new Point(-1, -1), 2);
Imgproc.erode(imgCanny, imgCanny, imgKernel, new Point(-1, -1), 1);
Imshow.show(imgCanny);
Mat lines = new Mat();
int threshold = 100;
int minLineSize = imageWidth < imageHeight ? imageWidth / 3 : imageHeight / 3;
int lineGap = 5;
Imgproc.HoughLinesP(imgCanny, lines, 1, Math.PI / 360, threshold, minLineSize, lineGap);
System.out.println(lines.rows());
for(int x = 0; x < lines.rows(); x++) {
double[] vec = lines.get(x, 0);
double x1 = vec[0], y1 = vec[1], x2 = vec[2], y2 = vec[3];
Point start = new Point(x1, y1);
Point end = new Point(x2, y2);
Imgproc.line(image, start, end, new Scalar(255, 0, 0), 1);
}
Imshow.show(image);
}
}
As suggested, I am trying use opencv contrib, using StructuredEdgeDetection. I am testing using a fixed image.
Frist I compile opencv with contrib
Segund I wrote the C++ code:
JNIEXPORT jobject JNICALL Java_vi_pdfscanner_main_ScannerEngine_getRandomFlorest(JNIEnv *env, jobject thiz) {
Mat mbgra = imread("/storage/emulated/0/Resp/coco.jpg", 1);
Mat3f fsrc;
mbgra.convertTo(fsrc, CV_32F, 1.0 / 255.0); // when I run those convertTo, I got all back image, that way I got no edges.
const String model = "/storage/emulated/0/Resp/model.yml.gz";
Ptr<cv::ximgproc::StructuredEdgeDetection> pDollar = cv::ximgproc::createStructuredEdgeDetection(model);
Mat edges;
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "chamando edges");
pDollar->detectEdges(fsrc, edges);
imwrite( "/storage/emulated/0/Resp/edges.jpg", edges);
jclass java_bitmap_class = (jclass)env->FindClass("android/graphics/Bitmap");
jmethodID mid = env->GetMethodID(java_bitmap_class, "getConfig", "()Landroid/graphics/Bitmap$Config;");
jobject bitmap_config = env->CallObjectMethod(bitmap, mid);
jobject _bitmap = mat_to_bitmap(env,edges,false,bitmap_config);
return _bitmap;
}
and I wrote this java wapper
public class ScannerEngine {
private static ScannerEngine ourInstance = new ScannerEngine();
public static ScannerEngine getInstance() {
return ourInstance;
}
private ScannerEngine() {
}
public native Bitmap getRandomFlorest(Bitmap bitmap);
static {
System.loadLibrary("opencv_java3");
System.loadLibrary("Scanner");
}
}
this point is, when I run those lines
Mat mbgra = imread("/storage/emulated/0/Resp/coco.jpg", 1); //image is ok
Mat3f fsrc;
mbgra.convertTo(fsrc, CV_32F, 1.0 / 255.0); //now image got all back, someone have some ideia why?
Thanks very much!
The Result about are strong, like this
Original Image:
http://prntscr.com/cyd8qi
Edges Image:
http://prntscr.com/cyd9ax
Its run on android 4.4 (api lvl 19) in a really old device.
That's all,
Thanks you very much

How can I implement Shadow Volumes in WebGL

I have some questions about drawing shadows of a .obj in a scene of WebGL.
For example, if I want to draw shadows with Shadow Volumes Method, how should I develop this ? I'm trying to implement this but I have failed. Are there more efficient methods to do this (writing less code) ?
Below is the the code:
function createShadowBuilder(item) {
var that = function() {};
that.init = function(item) {
this.item = item;
this.glPositionBuffer = null;
this.glVertexIndexBuffer = null;
};
that.setupData = function() {
if (this.glPositionBuffer !== null) {
gl.deleteBuffer(this.glPositionBuffer);
}
if (this.glVertexIndexBuffer !== null) {
gl.deleteBuffer(this.glVertexIndexBuffer);
}
this.glVertices = [];
this.glIndices = [];
};
that.addGLVertex = function(vector) {
this.glVertices.push(vector[0]);
this.glVertices.push(vector[1]);
this.glVertices.push(vector[2]);
this.glIndices.push(this.glIndices.length);
};
that.addShadowSide = function(vector1, vector2, vector3, vector4) {
this.addGLVertex(vector1);
this.addGLVertex(vector2);
this.addGLVertex(vector3);
this.addGLVertex(vector4);
this.addGLVertex(vector3);
this.addGLVertex(vector2);
};
/**
* Check which triangles face the light source...
**/
that.checkDirection = function(lightLocation) {
var triangles = this.item.triangles,
triangle,
vector,
i = triangles.length;
while (i) {
i--;
// Create a normalized vector based on the vector from
// the center of the triangle to the lights position...
triangle = triangles[i];
vector = vec3.create(triangle.center);
vector = vec3.normalize(vec3.subtract(vector, lightLocation));
// Compare the vector with the normal of the triangle...
triangle.visible = (vec3.dot(vector, triangle.normal) < 0);
}
}
/**
* Find the edge of the object...
**/
that.findEdge = function() {
var triangles = this.item.triangles,
triangle,
a, b,
lines = this.item.lines,
line,
lineSidesHash = {},
i, j, k;
this.lineSides = [];
i = triangles.length;
while (i) {
i--;
triangle = triangles[i];
if (triangle.visible) {
j = 3;
while (j) {
j--;
// Check if the side...
k = triangle.lines[j];
line = lines[k];
a = line.v1 + '_' + line.v2;
b = line.v2 + '_' + line.v1;
if (lineSidesHash[a] !== undefined) { // Check the v1 -> v2 direction...
// The side already exists, remove it...
delete(lineSidesHash[a]);
}
else if (lineSidesHash[b] !== undefined) { // Check the v2 -> v1 direction...
// The side already exists, remove it...
delete(lineSidesHash[b]);
}
else {
// It's a new side, add it to the list...
lineSidesHash[a] = k;
}
}
}
}
// Convert the hash map to an array...
for (i in lineSidesHash) {
line = lines[lineSidesHash[i]];
this.lineSides.push(line);
}
};
that.rotateVectorX = function(vector, angle) {
var x, y,
sin, cos;
if (angle === 0) {
return;
}
y = vector[1];
z = vector[2];
sin = Math.sin(angle);
cos = Math.cos(angle);
vector[1] = y * cos - z * sin;
vector[2] = y * sin + z * cos;
};
that.rotateVectorY = function(vector, angle) {
var x, z,
sin, cos;
if (angle === 0) {
return;
}
x = vector[0];
z = vector[2];
sin = Math.sin(angle);
cos = Math.cos(angle);
vector[0] = z * sin + x * cos;
vector[2] = z * cos - x * sin;
};
that.rotateVectorZ = function(vector, angle) {
var x, y,
sin, cos;
if (angle === 0) {
return;
}
x = vector[0];
y = vector[1];
sin = Math.sin(angle);
cos = Math.cos(angle);
vector[0] = x * cos - y * sin;
vector[1] = x * sin + y * cos;
};
/**
* Update the shadow...
**/
that.update = function(lightLocation, lightAngle, matrix, zoom) {
// Get the position of the light from the matrix, remove the zoom value...
var vector = vec3.subtract(vec3.create(lightLocation), [matrix[12], matrix[13], matrix[14] + zoom]),
sin, cos,
x, y, z;
// Instead of rotating the object to face the light at the
// right angle it's a lot faster to rotate the light in the
// reverse direction...
this.rotateVectorX(vector, -lightAngle[0]);
this.rotateVectorY(vector, -lightAngle[1]);
this.rotateVectorZ(vector, -lightAngle[2]);
// Store the location for later use...
this.lightLocation = vector;
this.setupData(); // Reset all lists and buffers...
this.checkDirection(vector); // Check which triangles face the light source...
this.findEdge(); // Find the edge...
};
/**
* Create the buffers for the shadow volume...
**/
that.createVolume = function(lightLocation) {
var vertices = this.item.vertices,
triangles = this.item.triangles,
triangle,
lineSides = this.lineSides,
line,
vector1, vector2, vector3, vector4,
i = lineSides.length,
j;
while (i) { // For all edge lines...
i--;
line = lineSides[i];
vector1 = vertices[line.v1];
vector2 = vertices[line.v2];
// Extrude the line away from the light...
// Get the vector from the light position to the vertex...
vector3 = vec3.subtract(vector1, lightLocation, vec3.create());
// Add the normalized vector scaled with the volume
// depth to the vertex which gives a point on the other
// side of the object than the light source...
vector3 = vec3.add(vec3.scale(vec3.normalize(vector3), 30), vector1);
// And again for the second point on the line...
vector4 = vec3.subtract(vector2, lightLocation, vec3.create());
vector4 = vec3.add(vec3.scale(vec3.normalize(vector4), 30), vector2);
this.addShadowSide(vector1, vector2, vector3, vector4);
}
// Add the end caps to the volume...
i = triangles.length;
while (i) {
i--;
triangle = triangles[i];
if (triangle.visible) { // Only add polygons facing the light...
// Add the top...
j = 3;
while (j) {
j--;
this.addGLVertex(vertices[triangle.vertices[j]]);
}
// Add the bottom...
j = 0;
while (j < 3) {
vector1 = vertices[triangle.vertices[j]];
vector2 = vec3.subtract(vector1, lightLocation, vec3.create());
this.addGLVertex(vec3.add(vec3.scale(vec3.normalize(vector2), 30), vector1));
j++;
}
}
}
// Create the vertex position buffer...
this.glPositionBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, this.glPositionBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(this.glVertices), gl.STATIC_DRAW);
this.glPositionBuffer.itemSize = 3;
// Create the vertex index buffer...
this.glVertexIndexBuffer = gl.createBuffer();
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.glVertexIndexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(this.glIndices), gl.STATIC_DRAW);
this.glVertexIndexBuffer.numItems = this.glIndices.length;
};
that.render = function() {
// Create the volume for the light...
this.createVolume(this.lightLocation);
gl.bindBuffer(gl.ARRAY_BUFFER, this.glPositionBuffer);
gl.vertexAttribPointer(shaderProgram.vertexPositionAttribute, this.glPositionBuffer.itemSize, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.glVertexIndexBuffer);
setMatrixUniforms();
// Disable the texture coord attribute...
gl.disableVertexAttribArray(shaderProgram.textureCoordAttribute);
// Disable the normal attribute...
gl.disableVertexAttribArray(shaderProgram.vertexNormalAttribute);
// Disable the color attribute...
gl.disableVertexAttribArray(shaderProgram.vertexColorAttribute);
// Render both front and back facing polygons with different stencil operations...
gl.disable(gl.CULL_FACE);
gl.enable(gl.STENCIL_TEST);
gl.depthFunc(gl.LESS);
// Disable rendering to the color buffer...
gl.colorMask(false, false, false, false);
// Disable z buffer updating...
gl.depthMask(false);
// Allow all bits in the stencil buffer...
gl.stencilMask(255);
// Increase the stencil buffer for back facing polygons, set the z pass opperator
gl.stencilOpSeparate(gl.BACK, gl.KEEP, gl.KEEP, gl.INCR);
// Decrease the stencil buffer for front facing polygons, set the z pass opperator
gl.stencilOpSeparate(gl.FRONT, gl.KEEP, gl.KEEP, gl.DECR);
// Always pass...
gl.stencilFunc(gl.ALWAYS, 0, 255);
gl.drawElements(gl.TRIANGLES, this.glVertexIndexBuffer.numItems, gl.UNSIGNED_SHORT, 0);
// Enable rendering the the color and depth buffer again...
gl.colorMask(true, true, true, true);
gl.depthMask(true);
gl.disable(gl.STENCIL_TEST);
};
that.init(item);
return that;
}
this code is taken from an example on the Web, I'm trying to adapt this.

Resources