I have been trying to perform AruCo or Marker detection in unity using a customer camera that can provide a texture, this is not a regular camera so you cant use the regular webcamtexture that comes with the package, i have been trying everything that i can imagine but it just dont work for me. I have tested so many things so ill post my code for AruCo and maybe someone can help me fix this, the code dont show any errors but it also doesnt detect anything or at minimum i was expecting the rejected corners to be detected.
using UnityEngine;
using UnityEngine.UI;
using UnityEngine.SceneManagement;
using System.Collections;
using System.Collections.Generic;
using OpenCVForUnity.CoreModule;
using OpenCVForUnity.Calib3dModule;
using OpenCVForUnity.ArucoModule;
using OpenCVForUnity.UnityUtils;
using Leap.Unity;
using Utils = OpenCVForUnity.UnityUtils.Utils;
using OpenCVForUnity.ImgprocModule;
namespace OpenCVForUnityExample
{
/// <summary>
/// ArUco Example
/// An example of marker-based AR view and camera pose estimation using the aruco (ArUco Marker Detection) module.
/// Referring to https://github.com/opencv/opencv_contrib/blob/master/modules/aruco/samples/detect_markers.cpp.
/// http://docs.opencv.org/3.1.0/d5/dae/tutorial_aruco_detection.html
/// </summary>
public class ArUcoExample_LM : MonoBehaviour
{
/// <summary>
/// The image texture.
/// </summary>
public Texture2D imgTexture;
[Space (10)]
/// <summary>
/// The dictionary identifier.
/// </summary>
public ArUcoDictionary dictionaryId = ArUcoDictionary.DICT_6X6_250;
/// <summary>
/// The dictionary id dropdown.
/// </summary>
public Dropdown dictionaryIdDropdown;
/// <summary>
/// Determines if shows rejected corners.
/// </summary>
public bool showRejectedCorners = false;
/// <summary>
/// The shows rejected corners toggle.
/// </summary>
public Toggle showRejectedCornersToggle;
/// <summary>
/// Determines if applied the pose estimation.
/// </summary>
public bool applyEstimationPose = true;
/// <summary>
/// The length of the markers' side. Normally, unit is meters.
/// </summary>
public float markerLength = 0.1f;
/// <summary>
/// The AR game object.
/// </summary>
public GameObject arGameObject;
/// <summary>
/// The AR camera.
/// </summary>
public Camera arCamera;
[Space (10)]
/// <summary>
/// Determines if request the AR camera moving.
/// </summary>
public bool shouldMoveARCamera = false;
/// <summary>
/// The rgb mat.
/// </summary>
Mat rgbMat;
Mat ids;
List<Mat> corners;
List<Mat> rejectedCorners;
Mat rvecs;
Mat tvecs;
Mat rotMat;
Mat gray;
Mat bw;
DetectorParameters detectorParams;
Dictionary dictionary;
/// <summary>
/// The texture.
/// </summary>
public Texture2D texture;
public LM_Texture2DToMat_ZED LMD;
Mat hierarchy;
List<MatOfPoint> contours;
// Use this for initialization
void Start ()
{
ids = new Mat();
corners = new List<Mat>();
rejectedCorners = new List<Mat>();
rvecs = new Mat();
tvecs = new Mat();
rotMat = new Mat(3, 3, CvType.CV_64FC1);
detectorParams = DetectorParameters.create();
dictionary = Aruco.getPredefinedDictionary((int)dictionaryId);
///////////////////
///
hierarchy = new Mat();
contours = new List<MatOfPoint>();
/////////////////////////////////////
}
// Update is called once per frame
void Update()
{
imgTexture = LMD.myCroppedTex2d;
if (imgTexture != null)
{
//gameObject.GetComponent<Renderer>().material.mainTexture = imgTexture; //THIS WAS JUST TO VALIDATE THE TEXTURE WAS COMMING PROPERLY.
if (rgbMat == null)
{
//rgbMat = new Mat(imgTexture.height, imgTexture.width, CvType.CV_8UC3);
rgbMat = LMD.outputMAT;
rgbMat.convertTo(rgbMat, CvType.CV_8UC3);
Debug.LogWarning("MAT INFO: "+rgbMat+" COLS: "+rgbMat.cols());
texture = new Texture2D(rgbMat.cols(), rgbMat.rows(), TextureFormat.RGBA32, false);
}
dictionaryIdDropdown.value = (int)dictionaryId;
showRejectedCornersToggle.isOn = showRejectedCorners;
DetectMarkers();
}
}
private void DetectMarkers ()
{
Utils.texture2DToMat (imgTexture, rgbMat); // <-- does not work the image breaks from there.
//Debug.Log ("imgMat dst ToString " + rgbMat.ToString ());
/////
//rgbMat = LMD.outputMAT;
//rgbMat.convertTo(rgbMat, CvType.CV_32SC2);
//rgbMat.convertTo(rgbMat, CvType.CV_8UC1);
//Debug.Log("imgMat dst ToString " + rgbMat.ToString());
//Utils.matToTexture2D(rgbMat, texture); // <-- if you disable line 121,297 and 295 and comment out this 3 lines, you get a blinking image.
//gameObject.GetComponent<Renderer>().material.mainTexture = texture;
/////
//gameObject.transform.localScale = new Vector3 (imgTexture.width, imgTexture.height, 1);
//Debug.Log ("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);
float width = rgbMat.width ();
float height = rgbMat.height ();
float imageSizeScale = 1.0f;
float widthScale = (float)Screen.width / width;
float heightScale = (float)Screen.height / height;
if (widthScale < heightScale) {
Camera.main.orthographicSize = (width * (float)Screen.height / (float)Screen.width) / 2;
imageSizeScale = (float)Screen.height / (float)Screen.width;
} else {
Camera.main.orthographicSize = height / 2;
}
// set camera parameters.
int max_d = (int)Mathf.Max (width, height);
double fx = max_d;
double fy = max_d;
double cx = width / 2.0f;
double cy = height / 2.0f;
Mat camMatrix = new Mat (3, 3, CvType.CV_64FC1);
camMatrix.put (0, 0, fx);
camMatrix.put (0, 1, 0);
camMatrix.put (0, 2, cx);
camMatrix.put (1, 0, 0);
camMatrix.put (1, 1, fy);
camMatrix.put (1, 2, cy);
camMatrix.put (2, 0, 0);
camMatrix.put (2, 1, 0);
camMatrix.put (2, 2, 1.0f);
//Debug.Log ("camMatrix " + camMatrix.dump ()); //Dont care about this right now
MatOfDouble distCoeffs = new MatOfDouble (0, 0, 0, 0);
//Debug.Log ("distCoeffs " + distCoeffs.dump ()); //Dont care about this right now
// calibration camera matrix values.
Size imageSize = new Size (width * imageSizeScale, height * imageSizeScale);
double apertureWidth = 0;
double apertureHeight = 0;
double[] fovx = new double[1];
double[] fovy = new double[1];
double[] focalLength = new double[1];
Point principalPoint = new Point (0, 0);
double[] aspectratio = new double[1];
Calib3d.calibrationMatrixValues (camMatrix, imageSize, apertureWidth, apertureHeight, fovx, fovy, focalLength, principalPoint, aspectratio);
//Debug.Log ("imageSize " + imageSize.ToString ());
//Debug.Log ("apertureWidth " + apertureWidth);
//Debug.Log ("apertureHeight " + apertureHeight);
//Debug.Log ("fovx " + fovx [0]);
//Debug.Log ("fovy " + fovy [0]);
//Debug.Log ("focalLength " + focalLength [0]);
//Debug.Log ("principalPoint " + principalPoint.ToString ());
//Debug.Log ("aspectratio " + aspectratio [0]);
// To convert the difference of the FOV value of the OpenCV and Unity.
double fovXScale = (2.0 * Mathf.Atan ((float)(imageSize.width / (2.0 * fx)))) / (Mathf.Atan2 ((float)cx, (float)fx) + Mathf.Atan2 ((float)(imageSize.width - cx), (float)fx));
double fovYScale = (2.0 * Mathf.Atan ((float)(imageSize.height / (2.0 * fy)))) / (Mathf.Atan2 ((float)cy, (float)fy) + Mathf.Atan2 ((float)(imageSize.height - cy), (float)fy));
//Debug.Log ("fovXScale " + fovXScale); //Dont care about this right now
//Debug.Log ("fovYScale " + fovYScale); //Dont care about this right now
// Adjust Unity Camera FOV https://github.com/opencv/opencv/commit/8ed1945ccd52501f5ab22bdec6aa1f91f1e2cfd4
if (widthScale < heightScale) {
arCamera.fieldOfView = (float)(fovx [0] * fovXScale);
} else {
arCamera.fieldOfView = (float)(fovy [0] * fovYScale);
}
// Display objects near the camera.
arCamera.nearClipPlane = 0.01f;
//Debug.Log("RGBMAT " + rgbMat);
//Debug.Log("Dictionary " + dictionary);
//Debug.Log("corners " + corners);
//Debug.Log("ids " + ids);
//Debug.Log("detectorParams " + detectorParams);
//Debug.Log("rejectedCorners " + rejectedCorners);
//Debug.Log("camMatrix " + camMatrix);
//Debug.Log("distCoeffs " + distCoeffs);
/////////////////////////////
if (gray == null)
{
gray = new Mat();
}
Imgproc.cvtColor(rgbMat, gray, Imgproc.COLOR_BGR2GRAY);
// Convert image to binary
if (bw == null)
{
bw = new Mat();
}
Imgproc.threshold(gray, bw, 50, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);
/////////////////////////////
///
// detect markers. ////////////////////////////////////// if you enable it breaks
Aruco.detectMarkers (gray, dictionary, corners, ids, detectorParams, rejectedCorners, camMatrix, distCoeffs);
// if at least one marker detected
if (ids.total () > 0) {
Debug.Log("some ids");
Aruco.drawDetectedMarkers (rgbMat, corners, ids, new Scalar (0, 255, 0));
// estimate pose.
if (applyEstimationPose) {
Debug.Log("this is progress");
Aruco.estimatePoseSingleMarkers (corners, markerLength, camMatrix, distCoeffs, rvecs, tvecs);
for (int i = 0; i < ids.total (); i++) {
using (Mat rvec = new Mat (rvecs, new OpenCVForUnity.CoreModule.Rect (0, i, 1, 1)))
using (Mat tvec = new Mat (tvecs, new OpenCVForUnity.CoreModule.Rect (0, i, 1, 1))) {
// In this example we are processing with RGB color image, so Axis-color correspondences are X: blue, Y: green, Z: red. (Usually X: red, Y: green, Z: blue)
Calib3d.drawFrameAxes(rgbMat, camMatrix, distCoeffs, rvec, tvec, markerLength * 0.5f);
}
// This example can display the ARObject on only first detected marker.
if (i == 0) {
// Get translation vector
double[] tvecArr = tvecs.get (i, 0);
// Get rotation vector
double[] rvecArr = rvecs.get (i, 0);
Mat rvec = new Mat (3, 1, CvType.CV_64FC1);
rvec.put (0, 0, rvecArr);
// Convert rotation vector to rotation matrix.
Calib3d.Rodrigues (rvec, rotMat);
double[] rotMatArr = new double[rotMat.total ()];
rotMat.get (0, 0, rotMatArr);
// Convert OpenCV camera extrinsic parameters to Unity Matrix4x4.
Matrix4x4 transformationM = new Matrix4x4 (); // from OpenCV
transformationM.SetRow (0, new Vector4 ((float)rotMatArr [0], (float)rotMatArr [1], (float)rotMatArr [2], (float)tvecArr [0]));
transformationM.SetRow (1, new Vector4 ((float)rotMatArr [3], (float)rotMatArr [4], (float)rotMatArr [5], (float)tvecArr [1]));
transformationM.SetRow (2, new Vector4 ((float)rotMatArr [6], (float)rotMatArr [7], (float)rotMatArr [8], (float)tvecArr [2]));
transformationM.SetRow (3, new Vector4 (0, 0, 0, 1));
Debug.Log ("transformationM " + transformationM.ToString ());
Matrix4x4 invertYM = Matrix4x4.TRS (Vector3.zero, Quaternion.identity, new Vector3 (1, -1, 1));
Debug.Log ("invertYM " + invertYM.ToString ());
// right-handed coordinates system (OpenCV) to left-handed one (Unity)
// https://stackoverflow.com/questions/30234945/change-handedness-of-a-row-major-4x4-transformation-matrix
Matrix4x4 ARM = invertYM * transformationM * invertYM;
if (shouldMoveARCamera) {
ARM = arGameObject.transform.localToWorldMatrix * ARM.inverse;
Debug.Log ("ARM " + ARM.ToString ());
ARUtils.SetTransformFromMatrix (arCamera.transform, ref ARM);
} else {
ARM = arCamera.transform.localToWorldMatrix * ARM;
Debug.Log ("ARM " + ARM.ToString ());
ARUtils.SetTransformFromMatrix (arGameObject.transform, ref ARM);
}
}
}
}
}
if (showRejectedCorners && rejectedCorners.Count > 0)
Debug.Log("Show Rejected Corners");
Aruco.drawDetectedMarkers (rgbMat, rejectedCorners, new Mat (), new Scalar (255, 0, 0));
Utils.matToTexture2D (rgbMat, texture);
gameObject.GetComponent<Renderer>().material.mainTexture = texture;
}
private void ResetObjectTransform ()
{
// reset AR object transform.
Matrix4x4 i = Matrix4x4.identity;
ARUtils.SetTransformFromMatrix (arCamera.transform, ref i);
ARUtils.SetTransformFromMatrix (arGameObject.transform, ref i);
}
/// <summary>
/// Raises the destroy event.
/// </summary>
void OnDestroy ()
{
if (rgbMat != null)
rgbMat.Dispose ();
}
/// <summary>
/// Raises the back button click event.
/// </summary>
public void OnBackButtonClick ()
{
SceneManager.LoadScene ("OpenCVForUnityExample");
}
/// <summary>
/// Raises the dictionary id dropdown value changed event.
/// </summary>
public void OnDictionaryIdDropdownValueChanged (int result)
{
if ((int)dictionaryId != result) {
dictionaryId = (ArUcoDictionary)result;
ResetObjectTransform ();
DetectMarkers ();
}
}
/// <summary>
/// Raises the show rejected corners toggle value changed event.
/// </summary>
public void OnShowRejectedCornersToggleValueChanged ()
{
if (showRejectedCorners != showRejectedCornersToggle.isOn) {
showRejectedCorners = showRejectedCornersToggle.isOn;
ResetObjectTransform ();
DetectMarkers ();
}
}
public enum ArUcoDictionary
{
DICT_4X4_50 = Aruco.DICT_4X4_50,
DICT_4X4_100 = Aruco.DICT_4X4_100,
DICT_4X4_250 = Aruco.DICT_4X4_250,
DICT_4X4_1000 = Aruco.DICT_4X4_1000,
DICT_5X5_50 = Aruco.DICT_5X5_50,
DICT_5X5_100 = Aruco.DICT_5X5_100,
DICT_5X5_250 = Aruco.DICT_5X5_250,
DICT_5X5_1000 = Aruco.DICT_5X5_1000,
DICT_6X6_50 = Aruco.DICT_6X6_50,
DICT_6X6_100 = Aruco.DICT_6X6_100,
DICT_6X6_250 = Aruco.DICT_6X6_250,
DICT_6X6_1000 = Aruco.DICT_6X6_1000,
DICT_7X7_50 = Aruco.DICT_7X7_50,
DICT_7X7_100 = Aruco.DICT_7X7_100,
DICT_7X7_250 = Aruco.DICT_7X7_250,
DICT_7X7_1000 = Aruco.DICT_7X7_1000,
DICT_ARUCO_ORIGINAL = Aruco.DICT_ARUCO_ORIGINAL,
}
}
}
The problem with the code was that the images were coming as Alpha8 texture and OpenCV works with RGB texture, so when i was converting to RGB the A part was been strip from the image which was pretty much the only content in it.
The solution was posted on this other thread:
Solution to change format from Alpha8 to RGBA texture.
Related
At the moment I am working on a project, which searches a sample in a video stream and constantly check it.
But after a while (sometimes some seconds, sometimes some minutes) it just stops working. No detecting, no video stream.
I checked my processor, and also tried different resolutions. But it's still not working correctly.
#include "pch.h"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <array>
#include <stdio.h>
#include "opencv2/core/mat.hpp"
using namespace std;
using namespace cv;
/// Create a flag, used for nearly everything
int flag = 0;
int i = 0;
/// BlackPixel counter
float blackPixel;
/// Localizing the best match with minMaxLoc
double minVal, maxVal;
Point minLoc, maxLoc, matchLoc;
/// Punkte für drawMatch
Point middle_x, middle_y;
/// Creating picture Matrix
Mat image_input, image_template, image_display, image_object;
/// Create the result matrix
int result_cols, result_rows;
/// Test Inputs
Mat pictureTest;
Mat objectTest;
string intToString(int number) {
std::stringstream ss;
ss << number;
return ss.str();
}
void drawMatch(Mat object, Mat scene, vector<Point> match_centers)
{
for (size_t i = 0; i < match_centers.size(); i++)
{
/// middle of template at X
middle_x = Point((object.cols / 2) + match_centers[i].x, match_centers[i].y);
/// middle of template at Y
middle_y = Point(match_centers[i].x, match_centers[i].y + (object.rows / 2));
/// Zeichnet Rechteck um Match
rectangle(scene, Point(match_centers[i].x, match_centers[i].y), Point(match_centers[i].x + object.cols, match_centers[i].y + object.rows), Scalar(0, 255, 0), 2);
/// Zeigt das Ergebnis des Tracking
namedWindow("Track", WINDOW_NORMAL);
imshow("Track", scene);
/// Gibt die Koordinaten des Matches im Bild an
//putText(scene, "(" + intToString(match_centers[i].x) + "," + intToString(match_centers[i].y) + ")", Point(match_centers[i].x - 40, match_centers[i].y - 15), 1, 1, Scalar(255, 0, 0));
}
}
vector<Point> imageComparetion(Mat object, Mat scene, int match_method, float peek_percent) {
scene.copyTo(image_display);
object.copyTo(image_object);
result_cols = scene.cols - object.cols + 1;
result_rows = scene.rows - object.rows + 1;
Mat result(result_cols, result_rows, CV_32FC1);
/// match scene with template
matchTemplate(scene, object, result, match_method);
///normalize(result, result, 0, 1, NORM_MINMAX, -1, Mat());
normalize(result, result, 0, 1, NORM_MINMAX, -1, Mat());
/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
if (match_method == TM_SQDIFF || match_method == TM_SQDIFF_NORMED)
{
matchLoc = minLoc;
threshold(result, result, 0.1, 1, THRESH_BINARY_INV);
}
else
{
matchLoc = maxLoc;
threshold(result, result, 0.9, 1, THRESH_TOZERO);
}
vector<Point> res;
maxVal = 1.f;
Mat input_matrix = image_display; //webcam image into matrix
Mat match = Mat(input_matrix.size(), input_matrix.type(), Scalar::all(0));
while (maxVal > peek_percent) {
minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc, Mat());
if (maxVal > peek_percent) {
Rect r1(Point(maxLoc.x - object.cols / 2, maxLoc.y + object.rows / 2), Point(maxLoc.x + object.cols / 2, maxLoc.y - object.rows / 2));
rectangle(result, Point(maxLoc.x - object.cols / 2, maxLoc.y - object.rows / 2), Point(maxLoc.x + object.cols / 2, maxLoc.y + object.rows / 2), Scalar::all(0), -1);
res.push_back(maxLoc);
}
}
return res;
}
int main(int argc, char** argv) {
///open Cam
VideoCapture cap(1);
cap.set(CAP_PROP_FRAME_WIDTH, 1920);
cap.set(CAP_PROP_FRAME_HEIGHT, 1080);
///Check if cam is open
if (!cap.isOpened()) {
cout << "\n Bitte Verbindung zur Kamera ueberpruefen! \n";
return 0;
}
///Read the matches template
image_template = imread("C:/Users/Ceraxes/Pictures/glaspol.jpg");
Mat current = imread("C:/Users/Ceraxes/Pictures/glaspol.jpg");
cap >> image_input;
///find location of template in stream
vector<Point> match_centers = imageComparetion(image_template, image_input, TM_CCOEFF_NORMED, 0.3); //template, picture, matching method, treshold(peak percent)
/// shows the found template
drawMatch(image_template, image_input, match_centers);
waitKey(1);
///build window
namedWindow("IDS", WINDOW_NORMAL);
Mat detect = image_input; //img_display
Mat draw = Mat(detect.size(), detect.type(), Scalar::all(0));
while (true) {
cap >> detect;
///Show image from camera
imshow("IDS", detect);
/// check every found match
while (i < match_centers.size()) {
///Extract the found matches from the picture
Rect r3(Point(match_centers[i].x, match_centers[i].y), Point(match_centers[i].x + image_template.cols, match_centers[i].y + image_template.rows));
///compares the camera imgage with the template
absdiff(detect(r3), current, pictureTest);
///counts the differences
blackPixel = countNonZero(pictureTest == 150);
cout << blackPixel << "\n";
i++;
}
i = 0;
waitKey(1);
}
return 0;
}
I'm using a IDS ueye camera, openCV 4 und Visual Studio 2017.
Does anyone have an idea, why the algorithm slows down?
I'm trying to create a scrolling image that wraps around a canvas to follow its own tail. I've been trying to use PixelWriters and Readers to save off the vertical pixel lines that are scrolling off the screen to the West, and append these to a new image which, should grow on the RHS (East) of the screen.
It scrolls, but that's all that's happening. I don't understand how to calculate the scanlines, so apologies for this part.
Any help appreciated.
package controller;
import javafx.animation.AnimationTimer;
import javafx.scene.canvas.Canvas;
import javafx.scene.canvas.GraphicsContext;
import javafx.scene.image.*;
import javafx.scene.layout.*;
import util.GraphicsUtils;
import java.io.File;
import java.nio.ByteBuffer;
import java.nio.file.Path;
import java.nio.file.Paths;
class ImageContainer extends HBox {
int w, h;
int translatedAmount = 0;
Image image;
Canvas canvas;
long startNanoTime = System.nanoTime();
WritableImage eastImage = null;
public ImageContainer() {
setVisible(true);
load();
w = (int) image.getWidth();
h = (int) image.getHeight();
canvas = new Canvas(w, h);
int edgeX = (int) canvas.getWidth(); //You can set this a little west for visibility sake...whilst debugging
getChildren().addAll(canvas);
GraphicsContext gc = canvas.getGraphicsContext2D();
canvas.setVisible(true);
gc.drawImage(image, 0, 0, w, h);
setPrefSize(w, h);
eastImage = new WritableImage(translatedAmount+1, h); //create a new eastImage
new AnimationTimer() {
public void handle(long currentNanoTime) {
if (((System.nanoTime() - startNanoTime) / 1000000000.0) < 0.05) {
return;
} else {
startNanoTime = System.nanoTime();
}
translatedAmount++;
Image westLine = getSubImageRectangle(image, 1, 0, 1, h); //get a 1 pixel strip from west of main image
PixelReader westLinepixelReader = westLine.getPixelReader(); //create a pixel reader for this image
byte[] westLinePixelBuffer = new byte[1 * h * 4]; //create a buffer to store the pixels collected from the about to vanish westLine
westLinepixelReader.getPixels(0, 0, 1, h, PixelFormat.getByteBgraInstance(), westLinePixelBuffer, 0, 4); //collect the pixels from westLine strip
Image tempImg = eastImage; //save away the current east side image
byte[] tempBuffer = new byte[(int)tempImg.getWidth() * h * 4];
PixelReader tempImagePixelReader = tempImg.getPixelReader(); //create a pixel reader for our temp copy of the east side image
tempImagePixelReader.getPixels(0, 0, (int)tempImg.getWidth(), h, PixelFormat.getByteBgraInstance(), tempBuffer, 0, 4); //save the tempImage into the tempBuffer
eastImage = new WritableImage(translatedAmount+1, h); //create a new eastImage, but one size larger
PixelWriter eastImagePixelWriter = eastImage.getPixelWriter(); //create a pixel writer for this new east side image
eastImagePixelWriter.setPixels(1, 0, (int)tempImg.getWidth(), h, PixelFormat.getByteBgraInstance(), tempBuffer, 0, 4); //copy the temp image in at x=1
eastImagePixelWriter.setPixels((int)tempImg.getWidth(), 0, 1, h, PixelFormat.getByteBgraInstance(), westLinePixelBuffer, 0, 4); //copy the westLine at x=tempImg.width
image = getSubImageRectangle(image, 1, 0, (int) image.getWidth() - 1, h);
gc.drawImage(image, 0, 0); //draw main image
System.out.println(edgeX-eastImage.getWidth());
gc.drawImage(eastImage, edgeX-eastImage.getWidth(), 0); //add lost image lines
}
}.start();
}
public void load() {
Path imagePath = Paths.get("./src/main/resources/ribbonImages/clouds.png");
File f = imagePath.toFile();
assert f.exists();
image = new Image(f.toURI().toString());
}
public Image getSubImageRectangle(Image image, int x, int y, int w, int h) {
PixelReader pixelReader = image.getPixelReader();
WritableImage newImage = new WritableImage(pixelReader, x, y, w, h);
ImageView imageView = new ImageView();
imageView.setImage(newImage);
return newImage;
}
}
Why make this more difficult than necessary? Simply draw the image to the Canvas twice:
public static void drawImage(Canvas canvas, Image sourceImage, double offset, double wrapWidth) {
GraphicsContext gc = canvas.getGraphicsContext2D();
gc.clearRect(0, 0, canvas.getWidth(), canvas.getHeight());
// make |offset| < wrapWidth
offset %= wrapWidth;
if (offset < 0) {
// make sure positive offsets do not result in the previous version
// of the image not being drawn
offset += wrapWidth;
}
gc.drawImage(sourceImage, -offset, 0);
gc.drawImage(sourceImage, wrapWidth - offset, 0);
}
#Override
public void start(Stage primaryStage) {
Image image = new Image("https://upload.wikimedia.org/wikipedia/commons/thumb/e/ec/Mona_Lisa%2C_by_Leonardo_da_Vinci%2C_from_C2RMF_retouched.jpg/402px-Mona_Lisa%2C_by_Leonardo_da_Vinci%2C_from_C2RMF_retouched.jpg");
Canvas canvas = new Canvas(image.getWidth(), image.getHeight());
primaryStage.setResizable(false);
Scene scene = new Scene(new Group(canvas));
DoubleProperty offset = new SimpleDoubleProperty();
offset.addListener((observable, oldOffset, newOffset) -> drawImage(canvas, image, newOffset.doubleValue(), canvas.getWidth()));
Timeline timeline = new Timeline(
new KeyFrame(Duration.ZERO, new KeyValue(offset, 0, Interpolator.LINEAR)),
new KeyFrame(Duration.seconds(10), new KeyValue(offset, image.getWidth()*2, Interpolator.LINEAR))
);
timeline.setCycleCount(Animation.INDEFINITE);
timeline.play();
primaryStage.setScene(scene);
primaryStage.sizeToScene();
primaryStage.show();
}
I am trying denoise this image to get better edges
I've tried bilaterFilter, GaussianBlur, morphological close and several threshold but every time I get an image like:
and when I do the HoughLinesP with dilatation of edges is really bad result.
Can some one help me to improve this? Is there a some way to take out those noise?
Frist try: using GaussianBlur, in this case, I must use equalizeHist or I cant get edges even if I use a really low threshold
public class TesteNormal {
static {
System.loadLibrary("opencv_java310");
}
public static void main(String args[]) {
Mat imgGrayscale = new Mat();
Mat imgBlurred = new Mat();
Mat imgCanny = new Mat();
Mat image = Imgcodecs.imread("c:\\cordova\\imagens\\teste.jpg", 1);
int imageWidth = image.width();
int imageHeight = image.height();
Imgproc.cvtColor(image, imgGrayscale, Imgproc.COLOR_BGR2GRAY);
Imgproc.equalizeHist(imgGrayscale, imgGrayscale);
Imgproc.GaussianBlur(imgGrayscale, imgBlurred, new Size(5, 5), 1.8);
Photo.fastNlMeansDenoising(imgBlurred, imgBlurred);
Imshow.show(imgBlurred);
Mat imgKernel = Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3));
Imgproc.Canny(imgBlurred, imgCanny, 0, 80);
Imshow.show(imgCanny);
Imgproc.dilate(imgCanny, imgCanny, imgKernel, new Point(-1, -1), 2);
Imgproc.erode(imgCanny, imgCanny, imgKernel, new Point(-1, -1), 1);
Imshow.show(imgCanny);
Mat lines = new Mat();
int threshold = 100;
int minLineSize = imageWidth < imageHeight ? imageWidth / 3 : imageHeight / 3;
int lineGap = 5;
Imgproc.HoughLinesP(imgCanny, lines, 1, Math.PI / 360, threshold, minLineSize, lineGap);
System.out.println(lines.rows());
for(int x = 0; x < lines.rows(); x++) {
double[] vec = lines.get(x, 0);
double x1 = vec[0], y1 = vec[1], x2 = vec[2], y2 = vec[3];
Point start = new Point(x1, y1);
Point end = new Point(x2, y2);
Imgproc.line(image, start, end, new Scalar(255, 0, 0), 1);
}
Imshow.show(image);
}
}
Second try: using bilateral filter:
public class TesteNormal {
static {
System.loadLibrary("opencv_java310");
}
public static void main(String args[]) {
Mat imgBlurred = new Mat();
Mat imgCanny = new Mat();
Mat image = Imgcodecs.imread("c:\\cordova\\imagens\\teste.jpg", 1);
int imageWidth = image.width();
int imageHeight = image.height();
Imgproc.bilateralFilter(image, imgBlurred, 10, 35, 35);
Imshow.show(imgBlurred);
Mat imgKernel = Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3));
Imgproc.Canny(imgBlurred, imgCanny, 0, 120);
Imshow.show(imgCanny);
Imgproc.dilate(imgCanny, imgCanny, imgKernel, new Point(-1, -1), 2);
Imgproc.erode(imgCanny, imgCanny, imgKernel, new Point(-1, -1), 1);
Imshow.show(imgCanny);
Mat lines = new Mat();
int threshold = 100;
int minLineSize = imageWidth < imageHeight ? imageWidth / 3 : imageHeight / 3;
int lineGap = 5;
Imgproc.HoughLinesP(imgCanny, lines, 1, Math.PI / 360, threshold, minLineSize, lineGap);
System.out.println(lines.rows());
for(int x = 0; x < lines.rows(); x++) {
double[] vec = lines.get(x, 0);
double x1 = vec[0], y1 = vec[1], x2 = vec[2], y2 = vec[3];
Point start = new Point(x1, y1);
Point end = new Point(x2, y2);
Imgproc.line(image, start, end, new Scalar(255, 0, 0), 1);
}
Imshow.show(image);
}
}
As suggested, I am trying use opencv contrib, using StructuredEdgeDetection. I am testing using a fixed image.
Frist I compile opencv with contrib
Segund I wrote the C++ code:
JNIEXPORT jobject JNICALL Java_vi_pdfscanner_main_ScannerEngine_getRandomFlorest(JNIEnv *env, jobject thiz) {
Mat mbgra = imread("/storage/emulated/0/Resp/coco.jpg", 1);
Mat3f fsrc;
mbgra.convertTo(fsrc, CV_32F, 1.0 / 255.0); // when I run those convertTo, I got all back image, that way I got no edges.
const String model = "/storage/emulated/0/Resp/model.yml.gz";
Ptr<cv::ximgproc::StructuredEdgeDetection> pDollar = cv::ximgproc::createStructuredEdgeDetection(model);
Mat edges;
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "chamando edges");
pDollar->detectEdges(fsrc, edges);
imwrite( "/storage/emulated/0/Resp/edges.jpg", edges);
jclass java_bitmap_class = (jclass)env->FindClass("android/graphics/Bitmap");
jmethodID mid = env->GetMethodID(java_bitmap_class, "getConfig", "()Landroid/graphics/Bitmap$Config;");
jobject bitmap_config = env->CallObjectMethod(bitmap, mid);
jobject _bitmap = mat_to_bitmap(env,edges,false,bitmap_config);
return _bitmap;
}
and I wrote this java wapper
public class ScannerEngine {
private static ScannerEngine ourInstance = new ScannerEngine();
public static ScannerEngine getInstance() {
return ourInstance;
}
private ScannerEngine() {
}
public native Bitmap getRandomFlorest(Bitmap bitmap);
static {
System.loadLibrary("opencv_java3");
System.loadLibrary("Scanner");
}
}
this point is, when I run those lines
Mat mbgra = imread("/storage/emulated/0/Resp/coco.jpg", 1); //image is ok
Mat3f fsrc;
mbgra.convertTo(fsrc, CV_32F, 1.0 / 255.0); //now image got all back, someone have some ideia why?
Thanks very much!
The Result about are strong, like this
Original Image:
http://prntscr.com/cyd8qi
Edges Image:
http://prntscr.com/cyd9ax
Its run on android 4.4 (api lvl 19) in a really old device.
That's all,
Thanks you very much
I am working in a Processing project, but when I try to record the sketch with the GSvideo library I get this error:
A library used by this sketch is not installed properly.
GSVideo version: 1.0.0
A library relies on native code that's not available.
Or only works properly when the sketch is run as a 64-bit application.
In my project I'm tracking objects with the HSV space color and the OpenCV for Processing library and I want to record the sketch just so I can show later my work. This is my code:
/**
* HSVColorTracking
* Greg Borenstein
* https://github.com/atduskgreg/opencv-processing-book/blob/master/code/hsv_color_tracking/HSVColorTracking/HSVColorTracking.pde
*
* Modified by Jordi Tost #jorditost (color selection)
* University of Applied Sciences Potsdam, 2014
*
* Modified by Luz Alejandra Magre
* Universidad Tecnológica de Bolívar, 2015
*/
import gab.opencv.*;
import processing.video.*;
import codeanticode.gsvideo.*;
import java.awt.Rectangle;
GSCapture video;
OpenCV opencv;
GSMovieMaker mm;
int fps = 30;
PImage src, colorFilteredImage;
ArrayList<Contour> contours;
// <1> Set the range of Hue values for our filter
int rangeLow = 20;
int rangeHigh = 35;
void setup() {
frameRate(fps);
String[] cameras = GSCapture.list();
size(2*opencv.width, opencv.height, P2D);
if (cameras.length == 0)
{
println("There are no cameras available for capture.");
exit();
}
else {
println("Available cameras:");
for (int i = 0; i < cameras.length; i++) {
println(cameras[i]);
}
video = new GSCapture(this, 640, 480, cameras[0]);
video.start();
opencv = new OpenCV(this, video.width, video.height);
contours = new ArrayList<Contour>();
mm = new GSMovieMaker(this, width, height, "Test.ogg", GSMovieMaker.THEORA, GSMovieMaker.BEST, fps);
mm.setQueueSize(50, 10);
mm.start();
}
}
void draw() {
// Read last captured frame
if (video.available()) {
video.read();
}
// <2> Load the new frame of our movie in to OpenCV
opencv.loadImage(video);
// Tell OpenCV to use color information
opencv.useColor();
src = opencv.getSnapshot();
// <3> Tell OpenCV to work in HSV color space.
opencv.useColor(HSB);
// <4> Copy the Hue channel of our image into
// the gray channel, which we process.
opencv.setGray(opencv.getH().clone());
// <5> Filter the image based on the range of
// hue values that match the object we want to track.
opencv.inRange(rangeLow, rangeHigh);
// <6> Get the processed image for reference.
colorFilteredImage = opencv.getSnapshot();
///////////////////////////////////////////
// We could process our image here!
// See ImageFiltering.pde
///////////////////////////////////////////
// <7> Find contours in our range image.
// Passing 'true' sorts them by descending area.
contours = opencv.findContours(true, true);
// <8> Display background images
image(src, 0, 0);
image(colorFilteredImage, src.width, 0);
// <9> Check to make sure we've found any contours
if (contours.size() > 0) {
// <9> Get the first contour, which will be the largest one
Contour biggestContour = contours.get(0);
// <10> Find the bounding box of the largest contour,
// and hence our object.
Rectangle r = biggestContour.getBoundingBox();
// <11> Draw the bounding box of our object
noFill();
strokeWeight(2);
stroke(255, 0, 0);
rect(r.x, r.y, r.width, r.height);
// <12> Draw a dot in the middle of the bounding box, on the object.
noStroke();
fill(255, 0, 0);
ellipse(r.x + r.width/2, r.y + r.height/2, 30, 30);
text(r.x + r.width/2, 50, 50);
text(r.y + r.height/2, 50, 80);
}
loadPixels();
mm.addFrame(pixels);
saveFrame("frame-######.png");
}
void mousePressed() {
color c = get(mouseX, mouseY);
println("r: " + red(c) + " g: " + green(c) + " b: " + blue(c));
int hue = int(map(hue(c), 0, 255, 0, 180));
println("hue to detect: " + hue);
rangeLow = hue - 5;
rangeHigh = hue + 5;
}
void keyPressed() {
if (key == ' ') {
// Finish the movie if space bar is pressed
mm.finish();
// Quit running the sketch once the file is written
exit();
}
}
I would really appreciate the help on this.
i recently learn xna and is trying to create a room using vertices. I went and try out Riemeres XNA tutorial and learn alot but i can't seem to make my camera work well, every time i move left or right some of my image or textured seems to disappear and reappear. Please Help.
Here is my code.
public struct MyOwnVertexFormat
{
public Vector3 position;
private Vector2 texCoord;
public MyOwnVertexFormat(Vector3 position, Vector2 texCoord)
{
this.position = position;
this.texCoord = texCoord;
}
public readonly static VertexDeclaration VertexDeclaration = new VertexDeclaration
(
new VertexElement(0, VertexElementFormat.Vector3, VertexElementUsage.Position, 0),
new VertexElement(sizeof(float) * 3, VertexElementFormat.Vector2, VertexElementUsage.TextureCoordinate, 0)
);
}
public class Game1 : Microsoft.Xna.Framework.Game
{
GraphicsDeviceManager graphics;
GraphicsDevice device;
Effect effect;
Matrix viewMatrix;
Matrix projectionMatrix;
VertexBuffer vertexBuffer;
Vector3 cameraPos;
Texture2D streetTexture;
private Vector3 Position = Vector3.One;
private float Zoom = 2500;
private float RotationY = 0.0f;
private float RotationX = 0.0f;
private Matrix gameWorldRotation;
public Game1()
{
graphics = new GraphicsDeviceManager(this);
Content.RootDirectory = "Content";
}
protected override void Initialize()
{
graphics.PreferredBackBufferWidth =1024;
graphics.PreferredBackBufferHeight = 768;
graphics.IsFullScreen = false;
graphics.ApplyChanges();
base.Initialize();
}
protected override void LoadContent()
{
device = GraphicsDevice;
effect = Content.Load<Effect>("OurHLSLfile"); SetUpVertices();
SetUpCamera();
streetTexture = Content.Load<Texture2D>("streettexture");
}
private void UpdateKeyboard()
{
if (Keyboard.GetState().IsKeyDown(Keys.Escape))
Exit();
if (Keyboard.GetState().IsKeyDown(Keys.Up))
RotationX += 1.0f;
if (Keyboard.GetState().IsKeyDown(Keys.Down))
RotationX -= 1.0f;
if (Keyboard.GetState().IsKeyDown(Keys.Left))
RotationY += 1.0f;
if (Keyboard.GetState().IsKeyDown(Keys.Right))
RotationY -= 1.0f;
gameWorldRotation =
Matrix.CreateRotationX(MathHelper.ToRadians(RotationX)) *
Matrix.CreateRotationY(MathHelper.ToRadians(RotationY));
}
private void SetUpVertices()
{
MyOwnVertexFormat[] vertices = new MyOwnVertexFormat[12];
vertices[0] = new MyOwnVertexFormat(new Vector3(-20, 0, 10), new Vector2(-0.25f, 25.0f));
vertices[1] = new MyOwnVertexFormat(new Vector3(-20, 0, -100), new Vector2(-0.25f, 0.0f));
vertices[2] = new MyOwnVertexFormat(new Vector3(2, 0, 10), new Vector2(0.25f, 25.0f));
vertices[3] = new MyOwnVertexFormat(new Vector3(2, 0, -100), new Vector2(0.25f, 0.0f));
vertices[4] = new MyOwnVertexFormat(new Vector3(2, 1, 10), new Vector2(0.375f, 25.0f));
vertices[5] = new MyOwnVertexFormat(new Vector3(2, 1, -100), new Vector2(0.375f, 0.0f));
vertices[6] = new MyOwnVertexFormat(new Vector3(3, 1, 10), new Vector2(0.5f, 25.0f));
vertices[7] = new MyOwnVertexFormat(new Vector3(3, 1, -100), new Vector2(0.5f, 0.0f));
vertices[8] = new MyOwnVertexFormat(new Vector3(-13, 1, 10), new Vector2(0.75f, 25.0f));
vertices[9] = new MyOwnVertexFormat(new Vector3(-13, 1, -100), new Vector2(0.75f, 0.0f));
vertices[10] = new MyOwnVertexFormat(new Vector3(-13, 21, 10), new Vector2(1.25f, 25.0f));
vertices[11] = new MyOwnVertexFormat(new Vector3(-13, 21, -100), new Vector2(1.25f, 0.0f));
vertexBuffer = new VertexBuffer(device, MyOwnVertexFormat.VertexDeclaration, vertices.Length, BufferUsage.WriteOnly);
vertexBuffer.SetData(vertices);
}
private void SetUpCamera()
{
cameraPos = new Vector3(-25, 13, 75);
viewMatrix = Matrix.CreateLookAt(cameraPos, new Vector3(0, 2, -12), new Vector3(0, 1, 0));
projectionMatrix = Matrix.CreatePerspectiveFieldOfView(MathHelper.PiOver4, device.Viewport.AspectRatio, 1.0f, 5000.0f);
}
protected override void UnloadContent()
{
}
protected override void Update(GameTime gameTime)
{
if (GamePad.GetState(PlayerIndex.One).Buttons.Back == ButtonState.Pressed)
this.Exit();
UpdateKeyboard();
base.Update(gameTime);
}
protected override void Draw(GameTime gameTime)
{
device.Clear(ClearOptions.Target | ClearOptions.DepthBuffer, Color.DarkSlateBlue, 1.0f, 0);
effect.CurrentTechnique = effect.Techniques["Simplest"];
effect.Parameters["xViewProjection"].SetValue(viewMatrix * projectionMatrix * gameWorldRotation);
effect.Parameters["xTexture"].SetValue(streetTexture);
foreach (EffectPass pass in effect.CurrentTechnique.Passes)
{
pass.Apply();
device.SetVertexBuffer(vertexBuffer);
device.DrawPrimitives(PrimitiveType.TriangleStrip, 0, 10);
}
base.Draw(gameTime);
}
}
I assume it's a projection issue. This line, taken from your code, strengthens my assumption:
viewMatrix * projectionMatrix * gameWorldRotation
tl;dr, the correct order is:
gameWorldRotation * viewMatrix * projectionMatrix
Please remember that order matters when multiplying matrices. In mathematical terms:
Matrix multiplication is not commutative!
Those three matrices map a vector to three different coordinate systems, the world, view and projection space. Usually vertices are defined in object space. Multiplying a vector with the world (view, projection) matrix brings your vector to world (view, projection) space:
object space => world space => view space => projection space
XNA uses a row vector layout (as opposed to column vectors). That means a vector is declared horizontally (x, y, z). Skipping the dry interesting details, it means that when transforming a vector (multiplying the vector with a matrix), the vector is the left operand, while the matrix is the right operand:
A := [3x3 Matrix]
(x, y, z) * A = (x', y', z') // The result is another 3D vector
Now in order to apply all three matrices we simply use the result of the previous transformation as input for the next transform:
W .... world matrix
V .... view matrix
W .... projection matrix
x .... vector
x' ... transformed vector
x' = ((x * W) * V) * P
Finally, matrix multiplication is associative (braces don't matter). And that's why we can combine the world, view and projection matrix into a single matrix before sending it to the device:
x' = ((x * W) * V) * P = x * W * V * P = x * (W * V * P)
World * View * Projection. That's all you need. (And maybe some basic matrix math for your future work.)