Cannot add 4 numbers to mat opencv - opencv

I created a mat object in opencv , dimension Nx4 , in which I want to put N coordinates.
[Px Py 1 0]
[Py Px 0 1]
For this I wrote the following code :
vector<Point2f> features1 , features2;
Mat features_1;
for(int i = 0 , j = 0; i < feature1.size() ; ++i , j+=2)
{
features_1.at<Vec3d>(j) = {feature1[i].x , feature1[i].y , 1 , 0};
features_1.at<Vec3d>(j) = {feature1[i].y , -feature1[i].x , 0 , 1};
}
But at the first line of the loop I get the following error :
cv::Matx<_Tp, m, n>::Matx(_Tp, _Tp, _Tp, _Tp) [with _Tp = double; int m = 3; int n = 1]: Assertion channels >= 4' failed.
How can I solve this?

it's probably easier to push_back() single elements to a Mat, and later do a reshape().
this will make a 2*N x 4 Mat:
vector<Point2f> features1 , features2;
Mat features_1;
for(size_t i=0; i<feature1.size() ; ++i)
{
features_1.push_pack(feature1[i].x);
features_1.push_pack(feature1[i].y);
features_1.push_pack(1);
features_1.push_pack(0);
features_1.push_pack(feature2[i].y);
features_1.push_pack(feature2[i].x);
features_1.push_pack(0);
features_1.push_pack(1);
}
features_1 = features_1.reshape(1,4); // or reshape(1,features1.size()*2); //for 4xN*2

Where you wrote <Vec3d>, did you mean <Vec4d>?
Your code is not close to correct, but here is a minimal example of how your desired result can be achieved (if you don't have C++11, just use an old style iterator in the loop)
#include <iostream>
#include <vector>
using namespace std;
#include <opencv2/core/core.hpp>
using namespace cv;
int main()
{
vector<Point2f> feature1;
feature1.push_back(Point2f(1.0f, 2.0f));
feature1.push_back(Point2f(3.0f, 4.0f));
Mat features_1;
for(auto p : feature1)
{
features_1.push_back(Vec4d(p.x, p.y , 1 , 0));
features_1.push_back(Vec4d(p.y, -p.x , 0, 1));
}
// At this point the mat is (N, 1, CV_64FC4)
// Reshape if you want (N, 4, CV_64FC1);
features_1 = features_1.reshape(1,4);
cout << features_1;
}
This output of this is:
[1, 2, 1, 0;
2, -1, 0, 1;
3, 4, 1, 0;
4, -3, 0, 1]
However, if the features_1 is already allocated and you can't just use push_back, apart from releasing the mat, you could do this (assuming features_1.isContinuous() is true):
#include <iostream>
#include <vector>
using namespace std;
#include <opencv2/core/core.hpp>
using namespace cv;
int main()
{
vector<Point2f> feature1;
feature1.push_back(Point2f(1.0f, 2.0f));
feature1.push_back(Point2f(3.0f, 4.0f));
Mat features_1(2 * feature1.size(), 4, CV_64FC1);
Vec4d* ptr = features_1.ptr<Vec4d>(0);
for(auto p : feature1)
{
*ptr++ = Vec4d(p.x, p.y , 1 , 0);
*ptr++ = Vec4d(p.y, -p.x , 0, 1);
}
cout << features_1 << endl;
}
The above produces the same result as the previous version.
If there is any possibility in the above example that features_1.isContinuous() is false, you can use an iterator to scan features_1:
auto ptr = features_1.begin<double>();
for(auto p : feature1)
{
*ptr++ = p.x;
*ptr++ = p.y;
*ptr++ = 1;
*ptr++ = 0;
*ptr++ = p.y;
*ptr++ = -p.x;
*ptr++ = 0;
*ptr++ = 1;
}

Related

OpenCV: Wrong result in calibrateHandEye function

I am working in a robot application, in which I have a camera fixed to a robot gripper. In order to calculate the matrix transformation between the camera and the gripper Hcg I am using the calibrateHandEye new function provided in the OpenCV version 4.1.0
I had taken 10 pictures of the chessboard from the camera mounted in the gripper and at the same time I recorded the robot position.
The code I am working on:
// My_handeye.cpp : This file contains the 'main' function. Program execution begins and ends there.
//
#include <iostream>
#include <sstream>
#include <string>
#include <ctime>
#include <cstdio>
#include "pch.h"
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace std;
Mat eulerAnglesToRotationMatrix(Vec3f &theta);
Vec3f rotationMatrixToEulerAngles(Mat &R);
float rad2deg(float radian);
float deg2rad(float degree);
int main()
{
// Camera calibration information
std::vector<double> distortionCoefficients(5); // camera distortion
distortionCoefficients[0] = 2.4472856611074989e-01;
distortionCoefficients[1] = -8.1042032574246325e-01;
distortionCoefficients[2] = 0;
distortionCoefficients[3] = 0;
distortionCoefficients[4] = 7.8769462320821060e-01;
double f_x = 1.3624172121852105e+03; // Focal length in x axis
double f_y = 1.3624172121852105e+03; // Focal length in y axis (usually the same?)
double c_x = 960; // Camera primary point x
double c_y = 540; // Camera primary point y
cv::Mat cameraMatrix(3, 3, CV_32FC1);
cameraMatrix.at<float>(0, 0) = f_x;
cameraMatrix.at<float>(0, 1) = 0.0;
cameraMatrix.at<float>(0, 2) = c_x;
cameraMatrix.at<float>(1, 0) = 0.0;
cameraMatrix.at<float>(1, 1) = f_y;
cameraMatrix.at<float>(1, 2) = c_y;
cameraMatrix.at<float>(2, 0) = 0.0;
cameraMatrix.at<float>(2, 1) = 0.0;
cameraMatrix.at<float>(2, 2) = 1.0;
Mat rvec(3, 1, CV_32F), tvec(3, 1, CV_32F);
//
std::vector<Mat> R_gripper2base;
std::vector<Mat> t_gripper2base;
std::vector<Mat> R_target2cam;
std::vector<Mat> t_target2cam;
Mat R_cam2gripper = (Mat_<float>(3, 3));
Mat t_cam2gripper = (Mat_<float>(3, 1));
vector<String> fn;
glob("images/*.bmp", fn, false);
vector<Mat> images;
size_t num_images = fn.size(); //number of bmp files in images folder
Size patternsize(6, 8); //number of centers
vector<Point2f> centers; //this will be filled by the detected centers
float cell_size = 30;
vector<Point3f> obj_points;
R_gripper2base.reserve(num_images);
t_gripper2base.reserve(num_images);
R_target2cam.reserve(num_images);
t_target2cam.reserve(num_images);
for (int i = 0; i < patternsize.height; ++i)
for (int j = 0; j < patternsize.width; ++j)
obj_points.push_back(Point3f(float(j*cell_size),
float(i*cell_size), 0.f));
for (size_t i = 0; i < num_images; i++)
images.push_back(imread(fn[i]));
Mat frame;
for (size_t i = 0; i < num_images; i++)
{
frame = imread(fn[i]); //source image
bool patternfound = findChessboardCorners(frame, patternsize, centers);
if (patternfound)
{
drawChessboardCorners(frame, patternsize, Mat(centers), patternfound);
//imshow("window", frame);
//int key = cv::waitKey(0) & 0xff;
solvePnP(Mat(obj_points), Mat(centers), cameraMatrix, distortionCoefficients, rvec, tvec);
Mat R;
Rodrigues(rvec, R); // R is 3x3
R_target2cam.push_back(R);
t_target2cam.push_back(tvec);
Mat T = Mat::eye(4, 4, R.type()); // T is 4x4
T(Range(0, 3), Range(0, 3)) = R * 1; // copies R into T
T(Range(0, 3), Range(3, 4)) = tvec * 1; // copies tvec into T
cout << "T = " << endl << " " << T << endl << endl;
}
cout << patternfound << endl;
}
Vec3f theta_01{ deg2rad(-153.61), deg2rad(8.3), deg2rad(-91.91) };
Vec3f theta_02{ deg2rad(-166.71), deg2rad(3.04), deg2rad(-93.31) };
Vec3f theta_03{ deg2rad(-170.04), deg2rad(24.92), deg2rad(-88.29) };
Vec3f theta_04{ deg2rad(-165.71), deg2rad(24.68), deg2rad(-84.85) };
Vec3f theta_05{ deg2rad(-160.18), deg2rad(-15.94),deg2rad(-56.24) };
Vec3f theta_06{ deg2rad(175.68), deg2rad(10.95), deg2rad(180) };
Vec3f theta_07{ deg2rad(175.73), deg2rad(45.78), deg2rad(-179.92) };
Vec3f theta_08{ deg2rad(-165.34), deg2rad(47.37), deg2rad(-166.25) };
Vec3f theta_09{ deg2rad(-165.62), deg2rad(17.95), deg2rad(-166.17) };
Vec3f theta_10{ deg2rad(-151.99), deg2rad(-14.59),deg2rad(-94.19) };
Mat robot_rot_01 = eulerAnglesToRotationMatrix(theta_01);
Mat robot_rot_02 = eulerAnglesToRotationMatrix(theta_02);
Mat robot_rot_03 = eulerAnglesToRotationMatrix(theta_03);
Mat robot_rot_04 = eulerAnglesToRotationMatrix(theta_04);
Mat robot_rot_05 = eulerAnglesToRotationMatrix(theta_05);
Mat robot_rot_06 = eulerAnglesToRotationMatrix(theta_06);
Mat robot_rot_07 = eulerAnglesToRotationMatrix(theta_07);
Mat robot_rot_08 = eulerAnglesToRotationMatrix(theta_08);
Mat robot_rot_09 = eulerAnglesToRotationMatrix(theta_09);
Mat robot_rot_10 = eulerAnglesToRotationMatrix(theta_10);
const Mat robot_tr_01 = (Mat_<float>(3, 1) << 781.2, 338.59, 903.48);
const Mat robot_tr_02 = (Mat_<float>(3, 1) << 867.65, 382.52, 884.42);
const Mat robot_tr_03 = (Mat_<float>(3, 1) << 856.91, 172.99, 964.61);
const Mat robot_tr_04 = (Mat_<float>(3, 1) << 748.81, 146.75, 1043.29);
const Mat robot_tr_05 = (Mat_<float>(3, 1) << 627.66, 554.08, 920.85);
const Mat robot_tr_06 = (Mat_<float>(3, 1) << 715.06, 195.96, 889.38);
const Mat robot_tr_07 = (Mat_<float>(3, 1) << 790.9, 196.29, 1117.38);
const Mat robot_tr_08 = (Mat_<float>(3, 1) << 743.5, 283.93, 1131.92);
const Mat robot_tr_09 = (Mat_<float>(3, 1) << 748.9, 288.19, 910.58);
const Mat robot_tr_10 = (Mat_<float>(3, 1) << 813.18, 400.44, 917.16);
R_gripper2base.push_back(robot_rot_01);
R_gripper2base.push_back(robot_rot_02);
R_gripper2base.push_back(robot_rot_03);
R_gripper2base.push_back(robot_rot_04);
R_gripper2base.push_back(robot_rot_05);
R_gripper2base.push_back(robot_rot_06);
R_gripper2base.push_back(robot_rot_07);
R_gripper2base.push_back(robot_rot_08);
R_gripper2base.push_back(robot_rot_09);
R_gripper2base.push_back(robot_rot_10);
t_gripper2base.push_back(robot_tr_01);
t_gripper2base.push_back(robot_tr_02);
t_gripper2base.push_back(robot_tr_03);
t_gripper2base.push_back(robot_tr_04);
t_gripper2base.push_back(robot_tr_05);
t_gripper2base.push_back(robot_tr_06);
t_gripper2base.push_back(robot_tr_07);
t_gripper2base.push_back(robot_tr_08);
t_gripper2base.push_back(robot_tr_09);
t_gripper2base.push_back(robot_tr_10);
calibrateHandEye(R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, R_cam2gripper, t_cam2gripper, CALIB_HAND_EYE_TSAI);
Vec3f R_cam2gripper_r = rotationMatrixToEulerAngles(R_cam2gripper);
cout << "R_cam2gripper = " << endl << " " << R_cam2gripper << endl << endl;
cout << "R_cam2gripper_r = " << endl << " " << R_cam2gripper_r << endl << endl;
cout << "t_cam2gripper = " << endl << " " << t_cam2gripper << endl << endl;
}
Mat eulerAnglesToRotationMatrix(Vec3f &theta)
{
// Calculate rotation about x axis
Mat R_x = (Mat_<double>(3, 3) <<
1, 0, 0,
0, cos(theta[0]), -sin(theta[0]),
0, sin(theta[0]), cos(theta[0])
);
// Calculate rotation about y axis
Mat R_y = (Mat_<double>(3, 3) <<
cos(theta[1]), 0, sin(theta[1]),
0, 1, 0,
-sin(theta[1]), 0, cos(theta[1])
);
// Calculate rotation about z axis
Mat R_z = (Mat_<double>(3, 3) <<
cos(theta[2]), -sin(theta[2]), 0,
sin(theta[2]), cos(theta[2]), 0,
0, 0, 1);
// Combined rotation matrix
Mat R = R_z * R_y * R_x;
return R;
}
float rad2deg(float radian) {
double pi = 3.14159;
return(radian * (180 / pi));
}
float deg2rad(float degree) {
double pi = 3.14159;
return(degree * (pi / 180));
}
// Checks if a matrix is a valid rotation matrix.
bool isRotationMatrix(Mat &R)
{
Mat Rt;
transpose(R, Rt);
Mat shouldBeIdentity = Rt * R;
Mat I = Mat::eye(3, 3, shouldBeIdentity.type());
return norm(I, shouldBeIdentity) < 1e-6;
}
// Calculates rotation matrix to euler angles
// The result is the same as MATLAB except the order
// of the euler angles ( x and z are swapped ).
Vec3f rotationMatrixToEulerAngles(Mat &R)
{
assert(isRotationMatrix(R));
float sy = sqrt(R.at<double>(0, 0) * R.at<double>(0, 0) + R.at<double>(1, 0) * R.at<double>(1, 0));
bool singular = sy < 1e-6; // If
float x, y, z;
if (!singular)
{
x = atan2(R.at<double>(2, 1), R.at<double>(2, 2));
y = atan2(-R.at<double>(2, 0), sy);
z = atan2(R.at<double>(1, 0), R.at<double>(0, 0));
}
else
{
x = atan2(-R.at<double>(1, 2), R.at<double>(1, 1));
y = atan2(-R.at<double>(2, 0), sy);
z = 0;
}
return Vec3f(x, y, z);
}
The result the function is giving me is the next one:
R_cam2gripper =
[0.3099803593003124, -0.8923086952824562, -0.3281727733547833;
0.7129271761196039, 0.4465219155360299, -0.5406967916458927;
0.6290047840821058, -0.0663579028402444, 0.7745641421680119]
R_cam2gripper_r =
[-0.0854626, -0.680272, 1.16065]
t_cam2gripper =
[-35.02063730299775;
-74.80633768251272;
-307.6725851251873]
I am getting 'good' results provided by other software. With them, the robot got to the exact points I am pointing in the camera (I have a 3D camera, from which I am getting the x, y, z from the camera world) so they are certainly correct, but I am having troubles to repeat the same result with the OpenCV function.
Sorry for the long introduction to my problem. Any understanding of why the solutions are not what is supposed to be? My guess is, that I have a problem understanding the angles or converting them but I couldn't find any way to solve this. Any hint will be well welcome!
I actually managed to solve this problem. The general idea was correct, but:
I was not understanding correctly the vector rotation notation the robot was giving. It was necessary to multiply the actual values by a factor.
I created a new program that extracts directly from the robot and the pictures the matrixes that the algorithm requires and writes these values to a YML file.
The CALIB_HAND_EYE_TSAI method wasn't giving me correct values. But with the four others, the values seem to converge to the actual values
Anyway, thank you for your help. I am stuck to get more precision in the algorithm, but that's for another question.

Having issues with opencv/dnn with caffe model

I am trying to make use of openpose example in opencv using caffe model and opencv/dnn.hpp
tutorial I have been following - https://www.learnopencv.com/deep-learning-based-human-pose-estimation-using-opencv-cpp-python/
we require 2 files for the network as said in the tutorial :
1 - prototxt - https://github.com/spmallick/learnopencv/blob/master/OpenPose/pose/coco/pose_deploy_linevec.prototxt
2 - caffe model - posefs1.perception.cs.cmu.edu/OpenPose/models/pose/coco/pose_iter_440000.caffemodel
ros node that I made following the tutorial :
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/dnn/dnn.hpp>
#include <sensor_msgs/image_encodings.h>
#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::dnn;
static const std::string OPENCV_WINDOW = "Image window";
#define COCO
#ifdef COCO
const int POSE_PAIRS[17][2] =
{
{1,2}, {1,5}, {2,3},
{3,4}, {5,6}, {6,7},
{1,8}, {8,9}, {9,10},
{1,11}, {11,12}, {12,13},
{1,0},{0,14},
{14,16}, {0,15}, {15,17}
};
static const std::string protoFile = "pose/coco/pose_deploy_linevec.prototxt";
static const std::string weightsFile = "pose/coco/pose_iter_440000.caffemodel";
int nPoints = 18;
#endif
class ImageConverter
{
ros::NodeHandle nh_;
image_transport::ImageTransport it_;
image_transport::Subscriber image_sub_;
public:
ImageConverter()
: it_(nh_)
{
image_sub_ = it_.subscribe("/zed/rgb/image_raw_color", 1, &ImageConverter::imageCb, this);
}
~ImageConverter()
{
cv::destroyWindow(OPENCV_WINDOW);
}
void imageCb(const sensor_msgs::ImageConstPtr& msg)
{
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
if (cv_ptr->image.rows > 60 && cv_ptr->image.cols > 60)
detect_people(cv_ptr->image);
cv::waitKey(3);
}
void detect_people(cv::Mat msg)
{
int inWidth = msg.cols;
int inHeight = msg.rows;
float thresh = 0.1;
cv::Mat frame;
msg.copyTo(frame);
cv::Mat frameCopy = frame.clone();
int frameWidth = frame.cols;
int frameHeight = frame.rows;
cv::dnn::Net net = cv::dnn::readNetFromCaffe("pose_deploy_linevec.prototxt" ,"pose_iter_440000.caffemodel");
cv::Mat inpBlob = blobFromImage(frame, 1.0/255, cv::Size(inWidth, inHeight), cv::Scalar(0, 0, 0), false, false);
net.setInput(inpBlob);
cv::Mat output = net.forward();
int H = output.size[2];
int W = output.size[3];
std::vector<cv::Point> points(nPoints);
for (int n=0; n < nPoints; n++)
{
// Probability map of corresponding body's part.
cv::Mat probMap(H, W, CV_32F, output.ptr(0,n));
cv::Point2f p(-1,-1);
cv::Point maxLoc;
double prob;
cv::minMaxLoc(probMap, 0, &prob, 0, &maxLoc);
if (prob > thresh)
{
p = maxLoc;
p.x *= (float)frameWidth / W ;
p.y *= (float)frameHeight / H ;
cv::circle(frameCopy, cv::Point((int)p.x, (int)p.y), 8, Scalar(0,255,255), -1);
cv::putText(frameCopy, cv::format("%d", n), cv::Point((int)p.x, (int)p.y), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 0, 255), 2);
}
points[n] = p;
}
int nPairs = sizeof(POSE_PAIRS)/sizeof(POSE_PAIRS[0]);
for (int n = 0; n < nPairs; n++)
{
// lookup 2 connected body/hand parts
Point2f partA = points[POSE_PAIRS[n][0]];
Point2f partB = points[POSE_PAIRS[n][1]];
if (partA.x<=0 || partA.y<=0 || partB.x<=0 || partB.y<=0)
continue;
cv::line(frame, partA, partB, cv::Scalar(0,255,255), 8);
cv::circle(frame, partA, 8, cv::Scalar(0,0,255), -1);
cv::circle(frame, partB, 8, cv::Scalar(0,0,255), -1);
}
cv::imshow("Output-Skeleton", frame);
}
};
int main(int argc, char** argv)
{
ros::init(argc, argv, "image_converter");
ros::NodeHandle nh_;
ros::Publisher pub;
ImageConverter ic;
ros::spin();
return 0;
}
The code is compiled without any errors, but while I run the code it gives the following error msg :
I get the following error when I run the node
error - OpenCV Error: Unspecified error (FAILED: fs.is_open(). Can't open "pose_deploy_linevec.prototxt") in ReadProtoFromTextFile, file /tmp/binarydeb/ros-kinetic-opencv3-3.3.1/modules/dnn/src/caffe/caffe_io.cpp, line 1119
terminate called after throwing an instance of 'cv::Exception'
what(): /tmp/binarydeb/ros-kinetic-opencv3-3.3.1/modules/dnn/src/caffe/caffe_io.cpp:1119: error: (-2) FAILED: fs.is_open(). Can't open "pose_deploy_linevec.prototxt" in function ReadProtoFromTextFile
Aborted (core dumped)
please help me solve this issue.
This issue is probably with the windows users only.
Solve the issue by:
Using/Adding absolute path when calling prototxt file.
Add the extension too. For Example:
"pose/coco/pose_deploy_linevec.prototxt.txt"
Spent 3 hours debugging this myself. Hope it helped someone else.
You are selecting the wrong file path.
Just replace this line:
static const std::string protoFile = "pose/coco/pose_deploy_linevec.prototxt";
with the path of the prototxt file in your laptop like this:
static const std::string protoFile = "C:/Users/lenovo/Desktop/learnopencv-master/OpenPose/pose/coco/pose_deploy_linevec.prototxt";

Assertion failed in my OpenCV program

My program is the another edition of the OpenCV Harris and Shi-Tomasi corner detection.
I just added a little code into it to change the image corner detection into video corner detection. However, it came out one faulty which I can't deal with. (cv::Exception).
This is my program:
#include "stdafx.h"
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
double Harris_minVal;
double Harris_maxVal;
double ShiTomasi_minVal;
double ShiTomasi_maxVal;
int Harris_qualityLevel = 50;
int ShiTomasi_qualityLevel = 50;
int max_qualityLevel = 100;
RNG rng(12345);
const char* Harris_window = "Harris Corner Detection";
const char* ShiTomasi_window = "ShiTomasi Corner Detection";
Mat frame, frame_gray, Harris_dst, Harris_copy, ShiTomasi_dst, ShiTomasi_copy,Mc;
void HarrisFunction(int, void*);
void ShiTomasiFunction(int, void*);
int main()
{
VideoCapture capture(0);
while (1) {
capture >> frame;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
int blockSize = 3;
int kSize = 3;
Harris_dst = Mat::zeros(frame_gray.size(), CV_32FC(6));
Mc = Mat::zeros(frame_gray.size(), CV_32FC1);
cornerEigenValsAndVecs(frame_gray, Harris_dst, blockSize, kSize, BORDER_DEFAULT);
for (int j = 0; j < frame_gray.rows; j++) {
for (int i = 0; i < frame_gray.cols; i++) {
float lambda_1 = Harris_dst.at<Vec6f>(j, i)[0];
float lambda_2 = Harris_dst.at<Vec6f>(j, i)[1];
Mc.at<float>(j, i) = lambda_1*lambda_2 - 0.04f*pow((lambda_1 + lambda_2), 2);
}
}
minMaxLoc(Mc, &Harris_minVal, &Harris_maxVal, 0, 0, Mat());
namedWindow(Harris_window, WINDOW_AUTOSIZE);
createTrackbar("质量:", Harris_window, &Harris_qualityLevel, max_qualityLevel, HarrisFunction);
HarrisFunction(0, 0);
ShiTomasi_dst = Mat::zeros(frame_gray.size(), CV_32FC1);
cornerEigenValsAndVecs(frame_gray, ShiTomasi_dst, blockSize, kSize, BORDER_DEFAULT);
minMaxLoc(ShiTomasi_dst, &ShiTomasi_minVal, &ShiTomasi_maxVal, 0, 0, Mat());
namedWindow(ShiTomasi_window, WINDOW_AUTOSIZE);
createTrackbar(" 质量:", ShiTomasi_window, &ShiTomasi_qualityLevel, max_qualityLevel, ShiTomasiFunction);
ShiTomasiFunction(0, 0);
}
return 0;
}
void HarrisFunction(int, void*) {
Harris_copy = frame.clone();
if (Harris_qualityLevel < 1) {
Harris_qualityLevel = 1;
}
for (int j = 0; j <= frame_gray.rows; j++) {
for (int i = 0; i <= frame_gray.cols; i++) {
if (Mc.at<float>(j, i) > Harris_minVal + (Harris_maxVal - Harris_minVal)*(Harris_qualityLevel / max_qualityLevel))
{
circle(Harris_copy, Point(i, j), 4, Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)),1,8,0);
}
}
}
imshow(Harris_window, Harris_copy);
waitKey(30);
}
void ShiTomasiFunction(int, void*) {
ShiTomasi_copy = frame.clone();
if (ShiTomasi_qualityLevel < 1) {
ShiTomasi_qualityLevel = 1;
}
for (int j = 0; j <= frame_gray.rows; j++) {
for (int i = 0; i <= frame_gray.cols; i++) {
if (ShiTomasi_dst.at<float>(j, i) > ShiTomasi_minVal + (ShiTomasi_maxVal - ShiTomasi_minVal)*(ShiTomasi_qualityLevel / max_qualityLevel))
{
circle(ShiTomasi_copy, Point(i, j), 4, Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)), 1, 8, 0);
}
}
}
imshow(ShiTomasi_window, ShiTomasi_copy);
waitKey(30);
}
After my debug, the Problem is before imshow(Harris_window,Harris_copy):
OpenCV Error: Assertion failed (dims <= 2 && data && (unsigned)i0<(unsigned)size.p[0] && (unsigned)(i1 * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels()) && ((((sizeof(size_t)<<28)|0x8442211) >> ((DataType<_Tp>::depth) & ((1<< 3) - 1))*4) & 15) == elemSize1()) in cv::Mat::at,filed:\opencv\build\include\opencv2\core\mat.inl.hpp, line 894
The upper part is what the panel window writes.

Implementing Lucas Kanade Method

I have a problem with the execution of my code.
I am trying to make this work:
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int main( )
{
Mat src1,src2;
namedWindow( "corner", CV_WINDOW_AUTOSIZE );
namedWindow( "result", CV_WINDOW_AUTOSIZE );
src1 = imread("RGB_32.png", CV_LOAD_IMAGE_COLOR);
src2 = imread("RGB_33.png", CV_LOAD_IMAGE_COLOR);
//*********************************************************************************
Mat im1,im2;
cvtColor(src1, im1, CV_RGB2GRAY);
cvtColor(src2, im2, CV_RGB2GRAY);
//******************ReduceImageSizeAndDetectCorner**********************************
Mat im2c,
tmp = im2;
pyrDown(tmp,im2c,Size( tmp.cols/2, tmp.rows/2));
Mat dst = Mat::zeros( im2c.size(), CV_32FC1 );
int blockSize = 3;
int apertureSize = 3;
double c = 0.09;
cornerHarris(im2c,dst,blockSize, apertureSize, c, BORDER_DEFAULT);
Mat dst_norm, dst_norm_scaled;
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
int w=23,nb_pt=0,l=0, alpha=143;
/// Drawing a circle around corners
for( int j = 0+w; j < dst_norm.rows-w ; j++ ){
for( int i = 0+w; i < dst_norm.cols-w; i++ ){
if( (int) dst_norm.at<float>(j,i) > alpha )
{
circle( im2c, Point( i, j ), 0.5, Scalar(0,0,255), 4, 8, 0 );
nb_pt ++ ;
}
}
}
Mat C= Mat:: zeros(nb_pt,2,CV_32FC1) ;
for( int j = 0+w; j < dst_norm.rows-w ; j++ ){
for( int i = 0+w; i < dst_norm.cols-w; i++ ){
if( (int) dst_norm.at<float>(j,i) > alpha ){
C.at <float> (l,0)=j;
C.at <float> (l,1)=i;
l++;
}
}
}
C=2*C;
//******************ImplementLucas&KanadeMethod**************************************
Mat1f Cx,Cy;
Mat Ix_m,Iy_m,It_m,It1,It2;
Cx = (Mat_<float>(2,2) << -1,1,-1,1);
Cy= (Mat_<float>(2,2) << -1,-1,1,1);
Mat Ct1 = (Mat_<float>(2,2) << -1,-1,-1,-1);
Mat Ct2 = Mat::ones( 2, 2, CV_8U );
filter2D(im1,Ix_m,-1,Cx,Point(-1,-1),0,BORDER_DEFAULT);
filter2D(im1,Iy_m,-1,Cy,Point(-1,-1),0,BORDER_DEFAULT);
filter2D(im1,It1,-1,Ct1,Point(-1,-1),0,BORDER_DEFAULT);
filter2D(im1,It2,-1,Ct2,Point(-1,-1),0,BORDER_DEFAULT);
add(It1,It2,It_m);
//initialiser le flot
//Mat u = Mat::zeros( 1, nb_pt, CV_32FC1 );
//Mat v = Mat::zeros( 1, nb_pt, CV_32FC1 );
Mat Ix,Ixd,Iy,Iyd,It,Itd,b,nu,A;
int u,v ;
cv::Scalar color(0,0,255);
int size = 10 ;
int thickness = 10 ;
for (int k=0 ; k < nb_pt ; ++k) {
int j= C.at <float> (k,0);
int i= C.at <float> (k,1);
Ix= Ix_m(Range(j-w,j+w),Range(i-w,i+w));
Iy= Iy_m(Range(j-w,j+w),Range(i-w,i+w));
It= It_m(Range(j-w,j+w),Range(i-w,i+w));
redim(Ix,Ixd);
redim(Iy,Iyd);
redim(It,Itd);
multi(Itd,b);
funct(Ixd,Iyd,A);
Mat inv = A.inv(DECOMP_SVD);
nu = inv * b ;
u= nu.at<float> (0,0);
v= nu.at<float> (0,1);
//cout << "u = "<< u << endl ;
//cout << "v = "<< v << endl ;
cvQuiver(src2,i,j,u,v,color, size, thickness);
}
imshow( "result", src2 );*/
waitKey();
}
I have a problem with the for loop. When k = 2, I get the error "core dumped". But, when I run the code case by case, that is to say I take the case k=0 then k=1,2,..., it works.
Any help please? when k=2

Extracting Hough lines intersection coordinate and getting the data to Notepad or Excel

I need help to get the coordinates of the lines that HoughLines produced and extract it to an output file (Notepad, Excel, or any other output files).
I managed to obtain the lines and based on my research on this site I found a post that tells how to obtain the coordinates, however due to my limited understanding I could not get the code to run along my original Hough code and get the intersection points coordinate onto an output file.
Here is my original Hough code:
#pragma once
#include <C:\OpenCV2.2\include\opencv\cv.h>
#include <C:\OpenCV2.2\include\opencv\highgui.h>
#include <C:\OpenCV2.2\include\opencv2\core\core.hpp>
#include <C:\OpenCV2.2\include\opencv2\imgproc\imgproc.hpp>
#include <C:\OpenCV2.2\include\opencv2\highgui\highgui.hpp>
#include <stdio.h>
#include <math.h>
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char* argv[])
{
cv::Mat dst_img, gray_img, contour_img, contrast_img;
cv::Mat src_img = cv::imread("C:\\Frame-1.bmp"); //Source image path
dst_img = src_img.clone();
dst_img.convertTo(contrast_img, -1, 1.5, 0);
cv::cvtColor(contrast_img, gray_img, CV_BGR2GRAY);
cv::Canny(gray_img, contour_img, 75, 225, 3);
vector<Vec2f> lines_;
HoughLines(contour_img, lines_, 1, CV_PI/180, 200);
for( size_t i = 0; i < lines_.size(); i++ )
{
float rho = lines_[i][0];
float theta = lines_[i][1];
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
Point pt1(cvRound(x0 + 1000*(-b)),
cvRound(y0 + 1000*(a)));
Point pt2(cvRound(x0 - 1000*(-b)),
cvRound(y0 - 1000*(a)));
cv::clipLine(gray_img.size(), pt1, pt2);
if(!dst_img.empty())
line( dst_img, pt1, pt2, Scalar(0, 0, 255), 1, CV_AA);
cv::imwrite("result.bmp", dst_img);
}
namedWindow("My Image");
imshow("My Image", dst_img);
waitKey(0);
return 0;
}
And here is the link to the code that I wanted to put into my original code:
I am struck at finding the point of intersection of most lines in an image
Right now my original code draws Houghlines and exports the image (as result.bmp) and at the same time displays the image on a new window.
I just need to figure how and where to put the new code plus an additional code to obtain the raw data of the coordinates onto an output file like Notepad, most desirably in the same folder as result.bmp (the name of the output file could be anything, just needed it to be there).
Sorry if this question sounds like a beginner`s question (I really am) and any help is much appreciated. Many thanks in advance.
Additional information: I am using OpenCV 2.2 and Microsoft Visual Studio Academic 2010
EDIT: This is all three codes (Hough, Coordinate extraction, and Exporting data to notepad) but as a complete beginner I don`t know to make them all work in a single code.
#pragma once
#include <C:\OpenCV2.2\include\opencv\cv.h>
#include <C:\OpenCV2.2\include\opencv\highgui.h>
#include <C:\OpenCV2.2\include\opencv2\core\core.hpp>
#include <C:\OpenCV2.2\include\opencv2\imgproc\imgproc.hpp>
#include <C:\OpenCV2.2\include\opencv2\highgui\highgui.hpp>
#include <stdio.h>
#include <math.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#define PointMinusPoint(P,Q,R) {(P).x = (Q).x - (R).x; (P).y = (Q).y - (R).y;}
#define PointCross(P,Q) (((P).x*(Q).y)-((P).y*(Q).x))
#define SIGN(X) (((X)>=0)? 1:-1 )
#define ABS(a) ((a) >= 0 ? (a) : (-(a)))
#define ROUND(a) ((SIGN(a)) * ( ( int )( ABS(a) + 0.5 ) ) )
typedef struct{
int x,y;
} MYintPOINT;
typedef struct {
MYintPOINT pStart;
MYintPOINT pEnd;
} MyLine;
using namespace std;
using namespace cv;
int main(int argc, char* argv[])
{
cv::Mat dst_img, gray_img, contour_img, contrast_img;
cv::Mat src_img = cv::imread("C:\\Frame-1.bmp"); //Source image path
dst_img = src_img.clone();
dst_img.convertTo(contrast_img, -1, 1.5, 0);
cv::cvtColor(contrast_img, gray_img, CV_BGR2GRAY);
cv::Canny(gray_img, contour_img, 75, 225, 3);
vector<Vec2f> lines_;
HoughLines(contour_img, lines_, 1, CV_PI/180, 200);
for( size_t i = 0; i < lines_.size(); i++ )
{
float rho = lines_[i][0];
float theta = lines_[i][1];
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
Point pt1(cvRound(x0 + 1000*(-b)),
cvRound(y0 + 1000*(a)));
Point pt2(cvRound(x0 - 1000*(-b)),
cvRound(y0 - 1000*(a)));
cv::clipLine(gray_img.size(), pt1, pt2);
if(!dst_img.empty())
line( dst_img, pt1, pt2, Scalar(0, 0, 255), 1, CV_AA);
cv::imwrite("result.bmp", dst_img);
}
int findLinesIntersectionPoint(const MyLine*l1, const MyLine*l2, MYintPOINT *res){
MYintPOINT p = l1->pStart;
MYintPOINT dp;
MYintPOINT q = l2->pStart;
MYintPOINT dq;
MYintPOINT qmp; // q-p
int dpdq_cross; // 2 cross products
int qpdq_cross; // dp with dq, q-p with dq
float a;
PointMinusPoint(dp,l1->pEnd,l1->pStart);
PointMinusPoint(dq,l2->pEnd,l2->pStart);
PointMinusPoint(qmp,q,p);
dpdq_cross = PointCross(dp,dq);
if (!dpdq_cross){
// Perpendicular Lines
return 0;
}
qpdq_cross = PointCross(qmp,dq);
a = (qpdq_cross*1.0f/dpdq_cross);
res->x = ROUND(p.x+a*dp.x);
res->y = ROUND(p.y+a*dp.y);
return 1;
}
string FileName= FileName_S.c_str();
string::size_type Extension = FileName_S.find_last_of('.'); // Find extension point
Mat mInputImg;
mInputImg= imread(FileName_S,1);
Size szInput= mInputImg.size();
const string DestinationFileName = FileName_S.substr(0, Extension) + "_ImageData.csv"; // Form the new name with container
ofstream myfile (DestinationFileName.c_str());
if (!myfile.is_open())
{
MessageBox(L"Unable to Open File");
}
string Text= format("Row, Col , Pixel Data,\n");
myfile << Text;
for (int Row = 0; Row < szInput.height; Row++)
{
for (int Col = 0; Col < szInput.width; Col++)
{
string Text= format("%d , %d , %d",Row,Col,mInputImg.at<uchar>(Row,Col));
myfile << Text;
myfile << "\n";
}
}
myfile.close();
namedWindow("My Image");
imshow("My Image", dst_img);
waitKey(0);
return 0;
}
It is very easy to export your data to Notepad or excel file. Here is the code to Export a mat to a csv File. Format your String with your desired data to export your desired data.
/*Exporting a Mat to Excel(.csv) file*/
string FileName= FileName_S.c_str();
string::size_type Extension = FileName_S.find_last_of('.'); // Find extension point
Mat mInputImg;
mInputImg= imread(FileName_S,1);
Size szInput= mInputImg.size();
const string DestinationFileName = FileName_S.substr(0, Extension) + "_ImageData.csv"; // Form the new name with container
ofstream myfile (DestinationFileName.c_str());
if (!myfile.is_open())
{
MessageBox(L"Unable to Open File");
}
string Text= format("Row, Col , Pixel Data,\n");
myfile << Text;
for (int Row = 0; Row < szInput.height; Row++)
{
for (int Col = 0; Col < szInput.width; Col++)
{
string Text= format("%d , %d , %d",Row,Col,mInputImg.at<uchar>(Row,Col));
myfile << Text;
myfile << "\n";
}
}
myfile.close();

Resources