Related
Initially, I tried implementing the OpenCV Basic Drawing example with rust using Rust OpenCV bindings (Crate opencv 0.48.0).
However, I was stuck.
I want to draw a closed polygon with opencv::imgproc::polylines.
The vertices of the polygon are given by an array of two-dimensional Cartesian coordinates.
I need to pass these points to the 2nd argument of the function which is of type &dyn opencv::core::ToInputArray.
This is where I struggle. How do I convert the array of vertices to an argument of type opencv::core::ToInputArray?
let pts = [[100, 50], [50, 150], [150, 150]];
imgproc::polylines(
&mut image,
???, <-- "pts" have to go here
true,
core::Scalar::from([0.0, 0.0, 255.0, 255.0]),
1, 8, 0).unwrap();
Minimal example
use opencv::{core, imgproc, highgui};
fn main() {
let mut image : core::Mat = core::Mat::new_rows_cols_with_default(
200, 200, core::CV_8UC4, core::Scalar::from([0.0, 0.0, 0.0, 0.0])).unwrap();
// draw yellow quad
imgproc::rectangle(
&mut image, core::Rect {x: 50, y: 50, width: 100, height: 100},
core::Scalar::from([0.0, 255.0, 255.0, 255.0]), -1, 8, 0).unwrap();
// should draw red triangle -> causes error (of course)
/*
let pts = [[100, 50], [50, 150], [150, 150]];
imgproc::polylines(
&mut image,
&pts,
true,
core::Scalar::from([0.0, 0.0, 255.0, 255.0]),
1, 8, 0).unwrap();
*/
highgui::imshow("", &image).unwrap();
highgui::wait_key(0).unwrap();
}
[dependencies]
opencv = {version = "0.48.0", features = ["buildtime-bindgen"]}
I found the solution with the help of the comment from #kmdreko.
I can define the vertices with an opencv::types::VectorOfPoint, that implements an opencv::core::ToInputArray trait:
let pts = types::VectorOfPoint::from(vec![
core::Point{x: 100, y: 50},
core::Point{x: 50, y: 150},
core::Point{x: 150, y: 150}]
);
Complete example:
use opencv::{core, types, imgproc, highgui};
fn main() {
let mut image : core::Mat = core::Mat::new_rows_cols_with_default(
200, 200, core::CV_8UC4, core::Scalar::from([0.0, 0.0, 0.0, 0.0])).unwrap();
let pts = types::VectorOfPoint::from(vec![
core::Point{x: 100, y: 50},
core::Point{x: 50, y: 150},
core::Point{x: 150, y: 150}]
);
imgproc::polylines(
&mut image,
&pts,
true,
core::Scalar::from([0.0, 0.0, 255.0, 255.0]),
1, 8, 0).unwrap();
highgui::imshow("", &image).unwrap();
highgui::wait_key(0).unwrap();
}
I'm writing a game in Swift 5 (but had the same problem with Swift 4 which i recently updated by game from), with all my SCNNode's centered around SCNVector3Zero. In other word's (0,0,0) is at the center of my play area.
The SCNCamera is attached to a SNCNode and positioned just outside the limits of the play area, looking at (0,0,0).
cameraNode.look(at: SCNVector3Zero)
If I place 5 nodes in the scene, all on the y=0 plane, (0,0,0), (-1,0,-1), (1,0,1), (-1,0,1) and (1,0,-1) like the face of a dice showing five, it all works great. I can rotate around the scene with the node at (0,0,0) staying centered with no movement.
If I add a 2nd row, so the first row moves to y=1 and the second row gets nodes with y=-1 the scene wobbles a little when rotating.
The further a node is moved from the center, the more exaggerated this wobble becomes.
Here's the code setting up the scene (this example has three rows and looks like a three dimensional "five" face of a dice, point in the center at (0,0,0) and the other dots at each corner of a cube) ...
addSphere(
x: 0.0,
y: 0.0,
z: 0.0,
radius: radius,
textureName: "earth")
addSphere(
x: -2.0 * spacing,
y: -2.0 * spacing,
z: 0.0,
radius: radius,
textureName: "earth")
addSphere(
x: 2.0 * spacing,
y: 2.0 * spacing,
z: 0.0,
radius: radius,
textureName: "earth")
addSphere(
x: -2.0 * spacing,
y: 2.0 * spacing,
z: 0.0,
radius: radius,
textureName: "granite")
addSphere(
x: 2.0 * spacing,
y: -2.0 * spacing,
z: 0.0,
radius: radius,
textureName: "granite")
addSphere(
x: 0.0,
y: -2.0 * spacing,
z: -2.0 * spacing,
radius: radius,
textureName: "slime")
addSphere(
x: 0.0,
y: 2.0 * spacing,
z: 2.0 * spacing,
radius: radius,
textureName: "slime")
addSphere(
x: 0.0,
y: 2.0 * spacing,
z: -2.0 * spacing,
radius: radius,
textureName: "wood")
addSphere(
x: 0.0,
y: -2.0 * spacing,
z: 2.0 * spacing,
radius: radius,
textureName: "wood")
This code behaves, everything is symmetrical around (0,0,0) with the node at (0,0,0) staying exactly where it should.
If I introduce this node into the scene, it all goes badly wrong ...
addSphere(
x: 6.0,
y: 6.0,
z: 6.0,
radius: radius,
textureName: "earth")
I've tried adding a fixed physics body to each node that has a mass of zero to no avail. It's like the camera is no longer looking directly at (0,0,0) but is influenced but the nodes in the scene.
I've tried all sorts of permutations of adding nodes, and on some tests it appears adding anything with value for the z-axis caused problems
This is the addSphere method ...
internal func addSphere(x: Float, y: Float, z: Float, radius: Float, color: UIColor, textureName: String) -> SCNNode {
let sphereGeometry = SCNSphere(radius: CGFloat(radius))
sphereGeometry.firstMaterial?.diffuse.contents = UIImage(imageLiteralResourceName: textureName)
let sphereNode = SCNNode(geometry: sphereGeometry)
sphereNode.name = "dot"
sphereNode.position = SCNVector3(x: x, y: y, z: z)
sphereNode.lines = [];
sphereNode.ignoreTaps = false
sphereNode.categoryBitMask = NodeBitMasks.dot
//sphereNode.physicsBody = SCNPhysicsBody(type: .static, shape: nil)
self.rootNode.addChildNode(sphereNode)
return sphereNode
}
There are no errors and as you might expect, any point positioned at (0,0,0) should remain static when rotating about while "looking at" this point.
Any pointers would be really appreciated as I can't make heads or tails of why it's behaving like this.
I assume you use the SCNView.allowsCameraControl = true to move the camera around. This built-in setting of SceneKit is actually only for debug purposes of your scene. It is not suited for anything when you want to have dedicated control over your camera movement.
You should instead try to implement a camera orbit, see https://stackoverflow.com/a/25674762/3358138.
I could reproduce your problem with the "wobbling" center of your scene, see Playground Gist https://gist.github.com/dirkolbrich/e2c247619b28a287c464abbc0595e23c.
A camera orbit solves this "wobbling" and let’s the camera stay on center, see Playground Gist https://gist.github.com/dirkolbrich/9e4dffb3026d0540d6edf6877f27d1e4.
I need help creating a function to convert three angles (in degrees, yaw pitch and roll) to six float variables.
How would I go about making a function output these floats?
{0, 0, 0} = {1, 0, 0, -0, -0, 1}
{45, 0, 0} = {0.70710676908493, 0.70710676908493, 0, -0, -0, 1}
{0, 90, 0} = {-4.3711388286738e-08, 0, 1, -1, 0, -4.3711388286738e-08}
{0, 0, 135} = {1, -0, 0, -0, -0.70710676908493, -0.70710676908493}
{180, 180, 0} = {1, -8.7422776573476e-08, 8.7422776573476e-08, 8.7422776573476e-08, 0, -1}
{225, 0, 225} = {-0.70710682868958, 0.5, 0.5, -0, 0.70710670948029, -0.70710682868958}
{270, 270, 270} = {1.4220277639103e-16, -2.3849761277006e-08, 1, 1, 1.1924880638503e-08, 1.42202776319103e-16}
{315, 315, 315} = {0.5, -0.85355341434479, 0.14644680917263, 0.70710688829422, 0.5, 0.5}
MORE EXAMPLES REQUESTED BY: Egor Skriptunoff
{10, 20, 30} = {0.92541658878326, -0.018028322607279, 0.37852230668068, -0.34202012419701, -0.46984630823135, 0.81379765272141}
{10, 30, 20} = {0.85286849737167, -0.0052361427806318, 0.52209949493408, -0.5, -0.29619812965393, 0.81379765272141}
{20, 10, 30} = {0.92541658878326, 0.21461015939713, 0.3123245537281, -0.17364817857742, -0.49240386486053, 0.85286849737167}
{20, 30, 10} = {0.81379765272141, 0.25523611903191, 0.52209949493408, -0.5, -0.15038372576237, 0.85286849737167}
{30, 10, 20} = {0.85286849737167, 0.41841205954552, 0.3123245537281, -0.17364817857742, -0.33682405948639, 0.92541658878326}
{30, 20, 10} = {0.81379765272141, 0.4409696161747, 0.37852230668068, -0.34202012419701, -0.16317591071129, 0.92541658878326}
The code I currently have can calculate all of the floats except the 2nd and 3rd.
function convert_rotations(Yaw, Pitch, Roll)
return {
math.cos(math.rad(Yaw))*math.cos(math.rad(Pitch)),
0,
0,
math.sin(math.rad(Pitch))*-1,
math.sin(math.rad(Roll))*math.cos(math.rad(Pitch))*-1,
math.cos(math.rad(Roll))*math.cos(math.rad(Pitch))
}
end
I cannot seem to find the correct math for when all angles are nonzero for the 2nd float and 3rd float, but I did come up with this:
-- The second float when the Yaw is 0 degrees
math.sin(math.rad(Pitch))*math.sin(math.rad(Roll))*-1
-- The second float when the Pitch is 0 degrees
math.sin(math.rad(Yaw))*math.cos(math.rad(Roll))
-- The second float when the Roll is 0 degrees
math.sin(math.rad(Yaw))*math.sin(math.rad(Pitch))
And for the 3rd float I came up with this:
-- The third float when Yaw is 0 degrees
math.sin(math.rad(Pitch))*math.cos(math.rad(Roll))
-- The third float when Pitch is 0 degrees
math.sin(math.rad(Yaw))*math.sin(math.rad(Roll))
-- The third float when Roll is 0 degrees
math.cos(math.rad(Yaw))*math.sin(math.rad(Pitch))
local function Rotate(X, Y, alpha)
local c, s = math.cos(math.rad(alpha)), math.sin(math.rad(alpha))
local t1, t2, t3 = X[1]*s, X[2]*s, X[3]*s
X[1], X[2], X[3] = X[1]*c+Y[1]*s, X[2]*c+Y[2]*s, X[3]*c+Y[3]*s
Y[1], Y[2], Y[3] = Y[1]*c-t1, Y[2]*c-t2, Y[3]*c-t3
end
local function convert_rotations(Yaw, Pitch, Roll)
local F, L, T = {1,0,0}, {0,1,0}, {0,0,1}
Rotate(F, L, Yaw)
Rotate(F, T, Pitch)
Rotate(T, L, Roll)
return {F[1], -L[1], -T[1], -F[3], L[3], T[3]}
end
Can someone please help me with the following code:
gl.viewport(0, 0, gl.viewportWidth, gl.viewportHeight);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
mat4.ortho(pMatrix, 0, gl.viewportWidth, 0, gl.viewportHeight, 0.1, 100);
mat4.identity(mvMatrix);
mat4.lookAt(mvMatrix, [0, 0, -40], [0, 0, 0], [0, 1, 0]);
Full source http://jsfiddle.net/bepa/2QXkp/
I trying to display a cube with a orthographic camera, but all I see is black. The cube should be at (0, 0, 0), the camera (0, 0, -40) and look at (0,0,0).
For all matrix transformations I use gl-matrix 2.2.0.
EDIT:
This works fine:
mat4.perspective(pMatrix, 45, gl.viewportWidth / gl.viewportHeight, 0.1, 100.0);
mat4.identity(mvMatrix);
mat4.lookAt(mvMatrix, [0, 40, -40], [0, 0, 0], [0, 1, 0]);
mat4.rotate(mvMatrix, mvMatrix, degToRad(45), [0, 1, 0]);
This don't work:
mat4.ortho(pMatrix, 0, gl.viewportWidth, 0, gl.viewportHeight, 0.1, 100);
mat4.identity(mvMatrix);
mat4.lookAt(mvMatrix, [0, 40, -40], [0, 0, 0], [0, 1, 0]);
mat4.rotate(mvMatrix, mvMatrix, degToRad(45), [0, 1, 0]);
mat4.ortho(pMatrix, -1.0, 1.0, -1.0, 1.0, 0.1, 100);
Gives a result that is not black ;)
The documentation of mat4.ortho():
/**
* Generates a orthogonal projection matrix with the given bounds
*
* #param {mat4} out mat4 frustum matrix will be written into
* #param {number} left Left bound of the frustum
* #param {number} right Right bound of the frustum
* #param {number} bottom Bottom bound of the frustum
* #param {number} top Top bound of the frustum
* #param {number} near Near bound of the frustum
* #param {number} far Far bound of the frustum
* #returns {mat4} out
*/
mat4.ortho = function (out, left, right, bottom, top, near, far) {
The width and height of the canvas is not needed for an ortho projection. But I'm not familiar enough with projection matrices to give you an in depth explanation why.
I've a calibrated camera where I exactly know the intrinsic and extrinsic data. Also the height of the camera is known. Now I want to virtually rotate the camera for getting a Bird's eye view, such that I can build the Homography matrix with the three rotation angles and the translation.
I know that 2 points can be transformed from one image to another via Homography as
x=K*(R-t*n/d)K^-1 * x'
there are a few things I'd like to know now:
if I want to bring back the image coordinate in ccs, I have to multiply it with K^-1, right? As Image coordinate I use (x',y',1) ?
Then I need to built a rotation matrix for rotating the ccs...but which convention should I use? And how do I know how to set up my WCS?
The next thing is the normal and the distance. Is it right just to take three points lying on the ground and compute the normal out of them? and is the distance then the camera height?
Also I'd like to know how I can change the height of the virtually looking bird view camera, such that I can say I want to see the ground plane from 3 meters height. How can I use the unit "meter" in the translation and homography Matrix?
So far for now, it would be great if someone could enlighten and help me. And please don't suggest generating the bird view with "getperspective", I ve already tried that but this way is not suitable for me.
Senna
That is the code i would advise (it's one of mine), to my mind it answers a lot of your questions,
If you want the distance, i would precise that it is in the Z matrix, the (4,3) coefficient.
Hope it will help you...
Mat source=imread("Whatyouwant.jpg");
int alpha_=90., beta_=90., gamma_=90.;
int f_ = 500, dist_ = 500;
Mat destination;
string wndname1 = getFormatWindowName("Source: ");
string wndname2 = getFormatWindowName("WarpPerspective: ");
string tbarname1 = "Alpha";
string tbarname2 = "Beta";
string tbarname3 = "Gamma";
string tbarname4 = "f";
string tbarname5 = "Distance";
namedWindow(wndname1, 1);
namedWindow(wndname2, 1);
createTrackbar(tbarname1, wndname2, &alpha_, 180);
createTrackbar(tbarname2, wndname2, &beta_, 180);
createTrackbar(tbarname3, wndname2, &gamma_, 180);
createTrackbar(tbarname4, wndname2, &f_, 2000);
createTrackbar(tbarname5, wndname2, &dist_, 2000);
imshow(wndname1, source);
while(true) {
double f, dist;
double alpha, beta, gamma;
alpha = ((double)alpha_ - 90.)*PI/180;
beta = ((double)beta_ - 90.)*PI/180;
gamma = ((double)gamma_ - 90.)*PI/180;
f = (double) f_;
dist = (double) dist_;
Size taille = source.size();
double w = (double)taille.width, h = (double)taille.height;
// Projection 2D -> 3D matrix
Mat A1 = (Mat_<double>(4,3) <<
1, 0, -w/2,
0, 1, -h/2,
0, 0, 0,
0, 0, 1);
// Rotation matrices around the X,Y,Z axis
Mat RX = (Mat_<double>(4, 4) <<
1, 0, 0, 0,
0, cos(alpha), -sin(alpha), 0,
0, sin(alpha), cos(alpha), 0,
0, 0, 0, 1);
Mat RY = (Mat_<double>(4, 4) <<
cos(beta), 0, -sin(beta), 0,
0, 1, 0, 0,
sin(beta), 0, cos(beta), 0,
0, 0, 0, 1);
Mat RZ = (Mat_<double>(4, 4) <<
cos(gamma), -sin(gamma), 0, 0,
sin(gamma), cos(gamma), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1);
// Composed rotation matrix with (RX,RY,RZ)
Mat R = RX * RY * RZ;
// Translation matrix on the Z axis change dist will change the height
Mat T = (Mat_<double>(4, 4) <<
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, dist,
0, 0, 0, 1);
// Camera Intrisecs matrix 3D -> 2D
Mat A2 = (Mat_<double>(3,4) <<
f, 0, w/2, 0,
0, f, h/2, 0,
0, 0, 1, 0);
// Final and overall transformation matrix
Mat transfo = A2 * (T * (R * A1));
// Apply matrix transformation
warpPerspective(source, destination, transfo, taille, INTER_CUBIC | WARP_INVERSE_MAP);
imshow(wndname2, destination);
waitKey(30);
}
This code works for me but I don't know why the Roll and Pitch angles are exchanged. When I change "alpha", the image is warped in pitch and when I change "beta" the image in warped in roll. So, I changed my rotation matrix, as can be seen below.
Also, the RY has a signal error. You can check Ry at: http://en.wikipedia.org/wiki/Rotation_matrix.
The rotation metrix I use:
Mat RX = (Mat_<double>(4, 4) <<
1, 0, 0, 0,
0, cos(beta), -sin(beta), 0,
0, sin(beta), cos(beta), 0,
0, 0, 0, 1);
Mat RY = (Mat_<double>(4, 4) <<
cos(alpha), 0, sin(alpha), 0,
0, 1, 0, 0,
-sin(alpha), 0, cos(alpha), 0,
0, 0, 0, 1);
Mat RZ = (Mat_<double>(4, 4) <<
cos(gamma), -sin(gamma), 0, 0,
sin(gamma), cos(gamma), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1);
Regards