OpenCV: How to use cvSobel? - opencv

I'm trying to find the gradient direction from the edges using OpenCv 2.4.5, but I'm having problem with cvSobel() and below is the error message and my code. I read somewhere that it might be due to the conversion between floating point(??) but I have no idea on how to fix it. Any Help??
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2\opencv.hpp>
#include <opencv2\calib3d\calib3d.hpp>
#include <iostream>
#include <stdlib.h>
#include "stdio.h"
using namespace cv;
using namespace std;
int main()
{
Mat im = imread("test1.jpg");
if (im.empty()) {
cout << "Cannot load image!" << endl;
}
Mat *dx, *dy;
dx = new Mat( Mat::zeros(im.rows, im.cols, 1));
dy = new Mat( Mat::zeros(im.rows, im.cols, 1));
imshow("Image", im);
// Convert Image to gray scale
Mat im_gray;
cvtColor(im, im_gray, CV_RGB2GRAY);
imshow("Gray", im_gray);
//trying to find the direction, but gives errors here
cvSobel(&im_gray, dx, 1,0,3);
waitKey(0);
return 0;
}

You are mixing the C++ and C api. cv::Mat is from the C++ api and CvArr* is from the C api.
here you are using The C api cvSobel on C++ classes.
//trying to find the direction, but gives errors here
cvSobel(&im_gray, dx, 1,0,3);
What happens if you do
cv::Sobel( im_gray, dx, im_gray.depth(), 1, 0, 3);
EDIT
and declare
Mat dx;
Mat dy;
I think this might solve your problem, I'm actually quite surprised your code compiles.

Related

how to remove floor surface in the pointcloud2 data with the pcl_ros

first, thanks for reading.
now i am trying to do remove floor in the point cloud data,
this is a code that i wrote to remove the floor point cloud.
#include <iostream>
#include <cmath>
#include <vector>
#include <ros/ros.h>
#include <sensor_msgs/PointCloud2.h>
#include <pcl_ros/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl_conversions/pcl_conversions.h>
#include <velodyne_pointcloud/point_types.h>
#include <pcl/common/common.h>
#include <pcl/common/centroid.h>
#include <pcl/common/transforms.h>
#include <pcl/console/parse.h>
#include <set>
#include <pcl/io/pcd_io.h>
#include <boost/format.hpp>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/filters/passthrough.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/segmentation/sac_segmentation.h>
struct VelodynePointXYZIRT
{
PCL_ADD_POINT4D
PCL_ADD_INTENSITY;
uint16_t ring;
float time;
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
} EIGEN_ALIGN16;
POINT_CLOUD_REGISTER_POINT_STRUCT (VelodynePointXYZIRT,
(float, x, x) (float, y, y) (float, z, z) (float, intensity, intensity)
(uint16_t, ring, ring) (float, time, time)
)
ros::Publisher pub1;
using PointXYZIRT = VelodynePointXYZIRT;
void help (const sensor_msgs::PointCloud2ConstPtr& scan)
{
// Convert the sensor_msgs/PointCloud2 data to pcl/PointCloud
pcl::PointCloud<VelodynePointXYZIRT>::Ptr cloud(new pcl::PointCloud<VelodynePointXYZIRT>());
pcl::fromROSMsg (*scan, *cloud);
pcl::ModelCoefficients coefficients;
pcl::PointIndices inliers;
// Create the segmentation object
pcl::SACSegmentation< pcl::PointCloud<PointXYZIRT>> seg;
// Optional
seg.setOptimizeCoefficients (true);
// Mandatory
seg.setModelType (pcl::SACMODEL_PLANE);
seg.setMethodType (pcl::SAC_RANSAC);
seg.setDistanceThreshold (0.01);
seg.setInputCloud (cloud.makeShared ());
seg.segment (inliers, coefficients);
// Publish the model coefficients
pcl_msgs::ModelCoefficients ros_coefficients;
pcl_conversions::fromPCL(coefficients, ros_coefficients);
pub1.publish (ros_coefficients);
}
int main (int argc, char** argv)
{
// Initialize ROS
ros::init (argc, argv, "my_pcl_tutorial");
ros::NodeHandle nh;
// Create a ROS subscriber for the input point cloud
ros::Subscriber sub = nh.subscribe ("input", 1, help);
// Create a ROS publisher for the output point cloud
//#pub1 = nh.advertise<sensor_msgs::PointCloud2> ("output", 1);
pub1 = nh.advertise<pcl_msgs::ModelCoefficients> ("output", 1);
// Spin
ros::spin ();
}
as i have to put the ring data in the code, so i made the struct for defining the velodyne lidar,
but when i catkin_make in the ros,
this error comes,
error: ‘pcl::PointCloud<VelodynePointXYZIRT>::Ptr {aka class boost::shared_ptr<pcl::PointCloud<VelodynePointXYZIRT> >}’ has no member named ‘makeShared’
seg.setInputCloud (cloud.makeShared ());
is there a method that i visualize the data?
my reference for making the code is this site,https://adioshun.gitbooks.io/pcl-tutorial/content/part-1/part01-chapter05/part01-chapter05-practice.html
and my previous code to transform the lidar data, i used these code to make my own.....
#include <iostream>
#include <cmath>
#include <vector>
#include <ros/ros.h>
#include <sensor_msgs/PointCloud2.h>
#include <pcl_ros/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl_conversions/pcl_conversions.h>
#include <velodyne_pointcloud/point_types.h>
#include <pcl/common/common.h>
#include <pcl/common/centroid.h>
#include <pcl/common/transforms.h>
#include <pcl/console/parse.h>
#include <set>
#include <pcl/io/pcd_io.h>
#include <boost/format.hpp>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/filters/passthrough.h>
#define PI 3.14159265359
using namespace std;
struct VelodynePointXYZIRT
{
PCL_ADD_POINT4D
PCL_ADD_INTENSITY;
uint16_t ring;
float time;
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
} EIGEN_ALIGN16;
POINT_CLOUD_REGISTER_POINT_STRUCT (VelodynePointXYZIRT,
(float, x, x) (float, y, y) (float, z, z) (float, intensity, intensity)
(uint16_t, ring, ring) (float, time, time)
)
ros::Publisher pub1;
float theta_r = 45* M_PI/ 180; // 라디안 각도로 회전 (180도 회전)
using PointXYZIRT = VelodynePointXYZIRT;
void input(const sensor_msgs::PointCloud2ConstPtr& scan)
{
// Msg to pointcloud
pcl::PointCloud<VelodynePointXYZIRT>::Ptr cloud(new pcl::PointCloud<VelodynePointXYZIRT>());
pcl::fromROSMsg(*scan,*cloud); // ros msg 에서 pcl cloud 데이터로 변환
//회전변환행렬
Eigen::Matrix4f transform_1 = Eigen::Matrix4f::Identity();
// Define a rotation matrix (see https://en.wikipedia.org/wiki/Rotation_matrix)
transform_1 (0,0) = std::cos (theta_r);
transform_1 (0,2) = std::sin(theta_r);
transform_1 (2,0) = -sin (theta_r);
transform_1 (2,2) = std::cos (theta_r);
// (row, column)
// Executing the transformation
pcl::PointCloud<VelodynePointXYZIRT>::Ptr transformed_cloud (new pcl::PointCloud<PointXYZIRT>());
pcl::transformPointCloud (*cloud, *transformed_cloud, transform_1);
pcl::PCLPointCloud2 cloud_p;
pcl::toPCLPointCloud2(*transformed_cloud, cloud_p);
sensor_msgs::PointCloud2 output;
pcl_conversions::fromPCL(cloud_p, output);
output.header.frame_id = "velodyne";
pub1.publish(output);
}
int main(int argc, char** argv)
{
ros::init(argc, argv, "input");
ros::NodeHandle nh;
ros::Subscriber sub = nh.subscribe<sensor_msgs::PointCloud2> ("vlp202", 100, input);
pub1 = nh.advertise<sensor_msgs::PointCloud2> ("vlp203", 100);
ros::spin();
}
//if you have to make your own type of custum point type in the pcl, you can see this one to see how to code it !!
// also the ring data that you have to use is from the lio_sam, image projection.cpp
so for the abstract,
i want to remove floor point cloud data in recorded bag,
so i made some code to remove floor data
The "cloud" object is already a shared pointer. Remove makeShared call:
seg.setInputCloud (cloud.makeShared ());
to
seg.setInputCloud (cloud);
For visualization you could publish over a topic the result as ros message and visualize it with rviz.

Run tensorflow model to achieve super resolution with opencv dnn.hpp

#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/imgproc.hpp"
#include<opencv2/opencv.hpp>
#include<opencv2/dnn.hpp>
#include <iostream>
using namespace std;
using namespace cv;
const size_t inWidth = 300;
const size_t inHeight = 300;
const float WHRatio = inWidth / (float)inHeight;
const char* classNames\[\] = { "background","face" };
void saveMat(cv::Mat inputMat,char* filename);
int main() {
String model = "opt_srcnn_vgg_test.pb";
dnn::Net net = cv::dnn::readNetFromTensorflow(model);
Mat img = imread("test.bmp");
Mat input;
img.copyTo(input);
cvtColor(input,input,CV_BGR2GRAY);//turn the rgb to gray
cout<<"load finish!"<<endl;
Mat inputBlob = dnn::blobFromImage(input, 1./255, Size(img.cols, img.rows));
imshow("input",input);
waitKey(0);
cout<<inputBlob.size;
net.setInput(inputBlob, "input");//set the network input,
cout<<"feed finish!"<<endl;
Mat pred = net.forward();
resize(pred,pred,Size(img.cols,img.rows));
imshow("result",pred);
waitKey(0);
return 0;
}
We tried to make opencv dnn use tensorflow trained .pb file to achieve super resolution. Our expected result is Figure 1, but we get the result of Figure 2 (Note: We determine that the .pb file is error-free). Why is this? Thank you!

HDR image reading and writing in opencv

I had written code for hdr image reading in opencv whenever i try to compile that i am getting ‘TonemapDurand’ was not declared in this scope
this type of error.
#include"opencv2/opencv.hpp"
#include "vector"
#include "bits/stdc++.h"
#include "fstream"
using namespace cv;
int main(int argc, char** argv )
{
vector<Mat>images;
Mat image;
image = imread( argv[1], 1 );
images.push_back(image);
Mat ldr;
Ptr<TonemapDurand> tonemap = createTonemapDurand(2.2f);
tonemap->process(images[0], ldr);
imwrite("ldr.png", ldr * 255);
waitKey(0);
return 0;
}
It looks like there is no HDR support in OpenCV 2.4.9, as you can see from here.
You have to install OpenCV 3 for doing your experiments on HDR.
There is a nice blog on using HDR in OpenCV here
It looks like you have missed some includes in your code :
#include <opencv2/photo.hpp>

split a BGR matrix without use split() function

I am programming with Visual Studio 2012 and the Opencv library, in the 2.4.6 version.
Someone can help me about splitting a BGR image into three images, one for every channel?
I know that there is the split function in OpenCV, but it causes me an unhandled exception, probably because I have a 64 bit processor with the 32 bit library, or probably it's the version of the library, so I want to know how to iterate on the pixel values of a BGR matrix without use split().
Thanks in advance.
If you don't want to use split() then you can read each r,g,b pixel value from your source image and write to destination image and which should be single channel.
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main( int argc, const char** argv ){
Mat src = imread("ball.jpg", 1);
Mat r(src.rows,src.cols,CV_8UC1);
Mat g(src.rows,src.cols,CV_8UC1);
Mat b(src.rows,src.cols,CV_8UC1);
for(int i=0;i<src.rows;i++){
for(int j=0;j<src.cols;j++){
Vec3b pixel = src.at<Vec3b>(i, j);
b.at<uchar>(i,j) = pixel[0];
g.at<uchar>(i,j) = pixel[1];
r.at<uchar>(i,j) = pixel[2];
}
}
imshow("src", src);
imshow("r", r);
imshow("g", g);
imshow("b", b);
waitKey(0);
}

How to obtain a contour from points with OpenCV

I'm trying to obtain a ROI from an image using VC++ and OpenCV.
I managed to display an image, get the coordinates of a point when I click on it, store these coordinates in a vector and draw lines between these points on my image.
Here is my code:
//Includes
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
static int app;
static vector<Point2f> cont(6);
static Mat img = imread("C:/img.jpg",0);
void on_mouse(int, int, int, int, void* );
int main()
{
app = 0;
namedWindow("myWindow", CV_WINDOW_AUTOSIZE);
cvSetMouseCallback("myWindow", on_mouse, 0);
imshow("myWindow", img);
waitKey(0);
}
void on_mouse(int evt, int x, int y, int flags, void* param)
{
if(evt == CV_EVENT_LBUTTONDOWN)
{
Point pt(x,y);
if(app<6)
{
cont[app]=pt;
app++;
}
cout<<"Coordonnees du point pt : "<<x<<","<<y<<endl;
for (int i=0; i<6;i++)
{cout<<cont[i]<<endl;}
}
if(evt == CV_EVENT_RBUTTONDOWN)
{
for (int j=0;j<5;j++)
{
line(img,cont[(j)],cont[(j+1)],CV_RGB(255,0,0),2);
}
line(img,cont[(5)],cont[(0)],CV_RGB(255,0,0),2);
imshow("myWindow", img);
}
}
What I would like to obtain is a vector that contains the coordinates of all the points of the contour and ultimately a bianary matrix the size of my image that contains 0 if the pixel is not in the contour, else 1.
Thanks for your help.
Make single element vector< vector< Point> > and then use drawContours with CV_FILLED. Then you will have binary matrix you wanted.
I currently don't have IDE but code will be like following
vector< vector< Point> > contours;
contours.push_back(cont);//your cont
Mat output(img.rows,img.cols,CV_8UC1);//your img
drawContours(output, contours, 0, Scalar(1), CV_FILLED);//now you have binary image

Resources